1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
44 *********************************************************************/
45 char ixv_driver_version[] = "1.1.4";
47 /*********************************************************************
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
57 static ixv_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 /* required last entry */
65 /*********************************************************************
66 * Table of branding strings
67 *********************************************************************/
69 static char *ixv_strings[] = {
70 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 /*********************************************************************
75 *********************************************************************/
76 static int ixv_probe(device_t);
77 static int ixv_attach(device_t);
78 static int ixv_detach(device_t);
79 static int ixv_shutdown(device_t);
80 #if __FreeBSD_version < 800000
81 static void ixv_start(struct ifnet *);
82 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
84 static int ixv_mq_start(struct ifnet *, struct mbuf *);
85 static int ixv_mq_start_locked(struct ifnet *,
86 struct tx_ring *, struct mbuf *);
87 static void ixv_qflush(struct ifnet *);
89 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
90 static void ixv_init(void *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_stop(void *);
93 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
94 static int ixv_media_change(struct ifnet *);
95 static void ixv_identify_hardware(struct adapter *);
96 static int ixv_allocate_pci_resources(struct adapter *);
97 static int ixv_allocate_msix(struct adapter *);
98 static int ixv_allocate_queues(struct adapter *);
99 static int ixv_setup_msix(struct adapter *);
100 static void ixv_free_pci_resources(struct adapter *);
101 static void ixv_local_timer(void *);
102 static void ixv_setup_interface(device_t, struct adapter *);
103 static void ixv_config_link(struct adapter *);
105 static int ixv_allocate_transmit_buffers(struct tx_ring *);
106 static int ixv_setup_transmit_structures(struct adapter *);
107 static void ixv_setup_transmit_ring(struct tx_ring *);
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_free_transmit_structures(struct adapter *);
110 static void ixv_free_transmit_buffers(struct tx_ring *);
112 static int ixv_allocate_receive_buffers(struct rx_ring *);
113 static int ixv_setup_receive_structures(struct adapter *);
114 static int ixv_setup_receive_ring(struct rx_ring *);
115 static void ixv_initialize_receive_units(struct adapter *);
116 static void ixv_free_receive_structures(struct adapter *);
117 static void ixv_free_receive_buffers(struct rx_ring *);
119 static void ixv_enable_intr(struct adapter *);
120 static void ixv_disable_intr(struct adapter *);
121 static bool ixv_txeof(struct tx_ring *);
122 static bool ixv_rxeof(struct ix_queue *, int);
123 static void ixv_rx_checksum(u32, struct mbuf *, u32);
124 static void ixv_set_multi(struct adapter *);
125 static void ixv_update_link_status(struct adapter *);
126 static void ixv_refresh_mbufs(struct rx_ring *, int);
127 static int ixv_xmit(struct tx_ring *, struct mbuf **);
128 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
129 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
130 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
131 static int ixv_dma_malloc(struct adapter *, bus_size_t,
132 struct ixv_dma_alloc *, int);
133 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
134 static void ixv_add_rx_process_limit(struct adapter *, const char *,
135 const char *, int *, int);
136 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
137 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
138 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
139 static void ixv_configure_ivars(struct adapter *);
140 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
142 static void ixv_setup_vlan_support(struct adapter *);
143 static void ixv_register_vlan(void *, struct ifnet *, u16);
144 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
146 static void ixv_save_stats(struct adapter *);
147 static void ixv_init_stats(struct adapter *);
148 static void ixv_update_stats(struct adapter *);
150 static __inline void ixv_rx_discard(struct rx_ring *, int);
151 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
154 /* The MSI/X Interrupt handlers */
155 static void ixv_msix_que(void *);
156 static void ixv_msix_mbx(void *);
158 /* Deferred interrupt tasklets */
159 static void ixv_handle_que(void *, int);
160 static void ixv_handle_mbx(void *, int);
162 /*********************************************************************
163 * FreeBSD Device Interface Entry Points
164 *********************************************************************/
166 static device_method_t ixv_methods[] = {
167 /* Device interface */
168 DEVMETHOD(device_probe, ixv_probe),
169 DEVMETHOD(device_attach, ixv_attach),
170 DEVMETHOD(device_detach, ixv_detach),
171 DEVMETHOD(device_shutdown, ixv_shutdown),
175 static driver_t ixv_driver = {
176 "ix", ixv_methods, sizeof(struct adapter),
179 extern devclass_t ixgbe_devclass;
180 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
181 MODULE_DEPEND(ixv, pci, 1, 1, 1);
182 MODULE_DEPEND(ixv, ether, 1, 1, 1);
185 ** TUNEABLE PARAMETERS:
189 ** AIM: Adaptive Interrupt Moderation
190 ** which means that the interrupt rate
191 ** is varied over time based on the
192 ** traffic for that interrupt vector
194 static int ixv_enable_aim = FALSE;
195 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
197 /* How many packets rxeof tries to clean at a time */
198 static int ixv_rx_process_limit = 128;
199 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201 /* Flow control setting, default to full */
202 static int ixv_flow_control = ixgbe_fc_full;
203 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
206 * Header split: this causes the hardware to DMA
207 * the header into a seperate mbuf from the payload,
208 * it can be a performance win in some workloads, but
209 * in others it actually hurts, its off by default.
211 static int ixv_header_split = FALSE;
212 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
215 ** Number of TX descriptors per ring,
216 ** setting higher than RX as this seems
217 ** the better performing choice.
219 static int ixv_txd = DEFAULT_TXD;
220 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
222 /* Number of RX descriptors per ring */
223 static int ixv_rxd = DEFAULT_RXD;
224 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
227 ** Shadow VFTA table, this is needed because
228 ** the real filter table gets cleared during
229 ** a soft reset and we need to repopulate it.
231 static u32 ixv_shadow_vfta[VFTA_SIZE];
233 /*********************************************************************
234 * Device identification routine
236 * ixv_probe determines if the driver should be loaded on
237 * adapter based on PCI vendor/device id of the adapter.
239 * return BUS_PROBE_DEFAULT on success, positive on failure
240 *********************************************************************/
243 ixv_probe(device_t dev)
245 ixv_vendor_info_t *ent;
247 u16 pci_vendor_id = 0;
248 u16 pci_device_id = 0;
249 u16 pci_subvendor_id = 0;
250 u16 pci_subdevice_id = 0;
251 char adapter_name[256];
254 pci_vendor_id = pci_get_vendor(dev);
255 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
258 pci_device_id = pci_get_device(dev);
259 pci_subvendor_id = pci_get_subvendor(dev);
260 pci_subdevice_id = pci_get_subdevice(dev);
262 ent = ixv_vendor_info_array;
263 while (ent->vendor_id != 0) {
264 if ((pci_vendor_id == ent->vendor_id) &&
265 (pci_device_id == ent->device_id) &&
267 ((pci_subvendor_id == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
270 ((pci_subdevice_id == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 sprintf(adapter_name, "%s, Version - %s",
273 ixv_strings[ent->index],
275 device_set_desc_copy(dev, adapter_name);
276 return (BUS_PROBE_DEFAULT);
283 /*********************************************************************
284 * Device initialization routine
286 * The attach entry point is called when the driver is being loaded.
287 * This routine identifies the type of hardware, allocates all resources
288 * and initializes the hardware.
290 * return 0 on success, positive on failure
291 *********************************************************************/
294 ixv_attach(device_t dev)
296 struct adapter *adapter;
300 INIT_DEBUGOUT("ixv_attach: begin");
302 /* Allocate, clear, and link in our adapter structure */
303 adapter = device_get_softc(dev);
304 adapter->dev = adapter->osdep.dev = dev;
308 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
311 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
312 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
314 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
316 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
317 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
318 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
319 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
321 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
322 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
323 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
324 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
326 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
329 &ixv_enable_aim, 1, "Interrupt Moderation");
331 /* Set up the timer callout */
332 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
334 /* Determine hardware revision */
335 ixv_identify_hardware(adapter);
337 /* Do base PCI setup - map BAR0 */
338 if (ixv_allocate_pci_resources(adapter)) {
339 device_printf(dev, "Allocation of PCI resources failed\n");
344 /* Do descriptor calc and sanity checks */
345 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
346 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
347 device_printf(dev, "TXD config issue, using default!\n");
348 adapter->num_tx_desc = DEFAULT_TXD;
350 adapter->num_tx_desc = ixv_txd;
352 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
353 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
354 device_printf(dev, "RXD config issue, using default!\n");
355 adapter->num_rx_desc = DEFAULT_RXD;
357 adapter->num_rx_desc = ixv_rxd;
359 /* Allocate our TX/RX Queues */
360 if (ixv_allocate_queues(adapter)) {
366 ** Initialize the shared code: its
367 ** at this point the mac type is set.
369 error = ixgbe_init_shared_code(hw);
371 device_printf(dev,"Shared Code Initialization Failure\n");
376 /* Setup the mailbox */
377 ixgbe_init_mbx_params_vf(hw);
381 /* Get Hardware Flow Control setting */
382 hw->fc.requested_mode = ixgbe_fc_full;
383 hw->fc.pause_time = IXV_FC_PAUSE;
384 hw->fc.low_water[0] = IXV_FC_LO;
385 hw->fc.high_water[0] = IXV_FC_HI;
386 hw->fc.send_xon = TRUE;
388 error = ixgbe_init_hw(hw);
390 device_printf(dev,"Hardware Initialization Failure\n");
395 error = ixv_allocate_msix(adapter);
399 /* Setup OS specific network interface */
400 ixv_setup_interface(dev, adapter);
402 /* Sysctl for limiting the amount of work done in the taskqueue */
403 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
404 "max number of rx packets to process", &adapter->rx_process_limit,
405 ixv_rx_process_limit);
407 /* Do the stats setup */
408 ixv_save_stats(adapter);
409 ixv_init_stats(adapter);
411 /* Register for VLAN events */
412 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
413 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
414 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
415 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
417 INIT_DEBUGOUT("ixv_attach: end");
421 ixv_free_transmit_structures(adapter);
422 ixv_free_receive_structures(adapter);
424 ixv_free_pci_resources(adapter);
429 /*********************************************************************
430 * Device removal routine
432 * The detach entry point is called when the driver is being removed.
433 * This routine stops the adapter and deallocates all the resources
434 * that were allocated for driver operation.
436 * return 0 on success, positive on failure
437 *********************************************************************/
440 ixv_detach(device_t dev)
442 struct adapter *adapter = device_get_softc(dev);
443 struct ix_queue *que = adapter->queues;
445 INIT_DEBUGOUT("ixv_detach: begin");
447 /* Make sure VLANS are not using driver */
448 if (adapter->ifp->if_vlantrunk != NULL) {
449 device_printf(dev,"Vlan in use, detach first\n");
453 IXV_CORE_LOCK(adapter);
455 IXV_CORE_UNLOCK(adapter);
457 for (int i = 0; i < adapter->num_queues; i++, que++) {
459 taskqueue_drain(que->tq, &que->que_task);
460 taskqueue_free(que->tq);
464 /* Drain the Link queue */
466 taskqueue_drain(adapter->tq, &adapter->mbx_task);
467 taskqueue_free(adapter->tq);
470 /* Unregister VLAN events */
471 if (adapter->vlan_attach != NULL)
472 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
473 if (adapter->vlan_detach != NULL)
474 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
476 ether_ifdetach(adapter->ifp);
477 callout_drain(&adapter->timer);
478 ixv_free_pci_resources(adapter);
479 bus_generic_detach(dev);
480 if_free(adapter->ifp);
482 ixv_free_transmit_structures(adapter);
483 ixv_free_receive_structures(adapter);
485 IXV_CORE_LOCK_DESTROY(adapter);
489 /*********************************************************************
491 * Shutdown entry point
493 **********************************************************************/
495 ixv_shutdown(device_t dev)
497 struct adapter *adapter = device_get_softc(dev);
498 IXV_CORE_LOCK(adapter);
500 IXV_CORE_UNLOCK(adapter);
504 #if __FreeBSD_version < 800000
505 /*********************************************************************
506 * Transmit entry point
508 * ixv_start is called by the stack to initiate a transmit.
509 * The driver will remain in this routine as long as there are
510 * packets to transmit and transmit resources are available.
511 * In case resources are not available stack is notified and
512 * the packet is requeued.
513 **********************************************************************/
515 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
518 struct adapter *adapter = txr->adapter;
520 IXV_TX_LOCK_ASSERT(txr);
522 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
525 if (!adapter->link_active)
528 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
530 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
534 if (ixv_xmit(txr, &m_head)) {
537 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
538 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
541 /* Send a copy of the frame to the BPF listener */
542 ETHER_BPF_MTAP(ifp, m_head);
544 /* Set watchdog on */
545 txr->watchdog_check = TRUE;
546 txr->watchdog_time = ticks;
553 * Legacy TX start - called by the stack, this
554 * always uses the first tx ring, and should
555 * not be used with multiqueue tx enabled.
558 ixv_start(struct ifnet *ifp)
560 struct adapter *adapter = ifp->if_softc;
561 struct tx_ring *txr = adapter->tx_rings;
563 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
565 ixv_start_locked(txr, ifp);
574 ** Multiqueue Transmit driver
578 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
580 struct adapter *adapter = ifp->if_softc;
581 struct ix_queue *que;
585 /* Which queue to use */
586 if ((m->m_flags & M_FLOWID) != 0)
587 i = m->m_pkthdr.flowid % adapter->num_queues;
589 txr = &adapter->tx_rings[i];
590 que = &adapter->queues[i];
592 if (IXV_TX_TRYLOCK(txr)) {
593 err = ixv_mq_start_locked(ifp, txr, m);
596 err = drbr_enqueue(ifp, txr->br, m);
597 taskqueue_enqueue(que->tq, &que->que_task);
604 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
606 struct adapter *adapter = txr->adapter;
608 int enqueued, err = 0;
610 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
611 IFF_DRV_RUNNING || adapter->link_active == 0) {
613 err = drbr_enqueue(ifp, txr->br, m);
617 /* Do a clean if descriptors are low */
618 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
623 next = drbr_dequeue(ifp, txr->br);
624 } else if (drbr_needs_enqueue(ifp, txr->br)) {
625 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
627 next = drbr_dequeue(ifp, txr->br);
631 /* Process the queue */
632 while (next != NULL) {
633 if ((err = ixv_xmit(txr, &next)) != 0) {
635 err = drbr_enqueue(ifp, txr->br, next);
639 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
640 /* Send a copy of the frame to the BPF listener */
641 ETHER_BPF_MTAP(ifp, next);
642 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
644 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
645 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
648 next = drbr_dequeue(ifp, txr->br);
652 /* Set watchdog on */
653 txr->watchdog_check = TRUE;
654 txr->watchdog_time = ticks;
661 ** Flush all ring buffers
664 ixv_qflush(struct ifnet *ifp)
666 struct adapter *adapter = ifp->if_softc;
667 struct tx_ring *txr = adapter->tx_rings;
670 for (int i = 0; i < adapter->num_queues; i++, txr++) {
672 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
681 /*********************************************************************
684 * ixv_ioctl is called when the user wants to configure the
687 * return 0 on success, positive on failure
688 **********************************************************************/
691 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
693 struct adapter *adapter = ifp->if_softc;
694 struct ifreq *ifr = (struct ifreq *) data;
695 #if defined(INET) || defined(INET6)
696 struct ifaddr *ifa = (struct ifaddr *) data;
697 bool avoid_reset = FALSE;
705 if (ifa->ifa_addr->sa_family == AF_INET)
709 if (ifa->ifa_addr->sa_family == AF_INET6)
712 #if defined(INET) || defined(INET6)
714 ** Calling init results in link renegotiation,
715 ** so we avoid doing it when possible.
718 ifp->if_flags |= IFF_UP;
719 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
721 if (!(ifp->if_flags & IFF_NOARP))
722 arp_ifinit(ifp, ifa);
724 error = ether_ioctl(ifp, command, data);
728 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
729 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
732 IXV_CORE_LOCK(adapter);
733 ifp->if_mtu = ifr->ifr_mtu;
734 adapter->max_frame_size =
735 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
736 ixv_init_locked(adapter);
737 IXV_CORE_UNLOCK(adapter);
741 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
742 IXV_CORE_LOCK(adapter);
743 if (ifp->if_flags & IFF_UP) {
744 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
745 ixv_init_locked(adapter);
747 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
749 adapter->if_flags = ifp->if_flags;
750 IXV_CORE_UNLOCK(adapter);
754 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
755 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
756 IXV_CORE_LOCK(adapter);
757 ixv_disable_intr(adapter);
758 ixv_set_multi(adapter);
759 ixv_enable_intr(adapter);
760 IXV_CORE_UNLOCK(adapter);
765 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
766 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
770 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
771 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
772 if (mask & IFCAP_HWCSUM)
773 ifp->if_capenable ^= IFCAP_HWCSUM;
774 if (mask & IFCAP_TSO4)
775 ifp->if_capenable ^= IFCAP_TSO4;
776 if (mask & IFCAP_LRO)
777 ifp->if_capenable ^= IFCAP_LRO;
778 if (mask & IFCAP_VLAN_HWTAGGING)
779 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
780 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
781 IXV_CORE_LOCK(adapter);
782 ixv_init_locked(adapter);
783 IXV_CORE_UNLOCK(adapter);
785 VLAN_CAPABILITIES(ifp);
790 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
791 error = ether_ioctl(ifp, command, data);
798 /*********************************************************************
801 * This routine is used in two ways. It is used by the stack as
802 * init entry point in network interface structure. It is also used
803 * by the driver as a hw/sw initialization routine to get to a
806 * return 0 on success, positive on failure
807 **********************************************************************/
808 #define IXGBE_MHADD_MFS_SHIFT 16
811 ixv_init_locked(struct adapter *adapter)
813 struct ifnet *ifp = adapter->ifp;
814 device_t dev = adapter->dev;
815 struct ixgbe_hw *hw = &adapter->hw;
818 INIT_DEBUGOUT("ixv_init: begin");
819 mtx_assert(&adapter->core_mtx, MA_OWNED);
820 hw->adapter_stopped = FALSE;
821 ixgbe_stop_adapter(hw);
822 callout_stop(&adapter->timer);
824 /* reprogram the RAR[0] in case user changed it. */
825 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
827 /* Get the latest mac address, User can use a LAA */
828 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
829 IXGBE_ETH_LENGTH_OF_ADDRESS);
830 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
831 hw->addr_ctrl.rar_used_count = 1;
833 /* Prepare transmit descriptors and buffers */
834 if (ixv_setup_transmit_structures(adapter)) {
835 device_printf(dev,"Could not setup transmit structures\n");
841 ixv_initialize_transmit_units(adapter);
843 /* Setup Multicast table */
844 ixv_set_multi(adapter);
847 ** Determine the correct mbuf pool
848 ** for doing jumbo/headersplit
850 if (ifp->if_mtu > ETHERMTU)
851 adapter->rx_mbuf_sz = MJUMPAGESIZE;
853 adapter->rx_mbuf_sz = MCLBYTES;
855 /* Prepare receive descriptors and buffers */
856 if (ixv_setup_receive_structures(adapter)) {
857 device_printf(dev,"Could not setup receive structures\n");
862 /* Configure RX settings */
863 ixv_initialize_receive_units(adapter);
865 /* Enable Enhanced MSIX mode */
866 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
867 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
868 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
869 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
871 /* Set the various hardware offload abilities */
872 ifp->if_hwassist = 0;
873 if (ifp->if_capenable & IFCAP_TSO4)
874 ifp->if_hwassist |= CSUM_TSO;
875 if (ifp->if_capenable & IFCAP_TXCSUM) {
876 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
877 #if __FreeBSD_version >= 800000
878 ifp->if_hwassist |= CSUM_SCTP;
883 if (ifp->if_mtu > ETHERMTU) {
884 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
885 mhadd &= ~IXGBE_MHADD_MFS_MASK;
886 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
887 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
890 /* Set up VLAN offload and filter */
891 ixv_setup_vlan_support(adapter);
893 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
895 /* Set up MSI/X routing */
896 ixv_configure_ivars(adapter);
898 /* Set up auto-mask */
899 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
901 /* Set moderation on the Link interrupt */
902 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
905 ixv_init_stats(adapter);
907 /* Config/Enable Link */
908 ixv_config_link(adapter);
910 /* And now turn on interrupts */
911 ixv_enable_intr(adapter);
913 /* Now inform the stack we're ready */
914 ifp->if_drv_flags |= IFF_DRV_RUNNING;
915 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
923 struct adapter *adapter = arg;
925 IXV_CORE_LOCK(adapter);
926 ixv_init_locked(adapter);
927 IXV_CORE_UNLOCK(adapter);
934 ** MSIX Interrupt Handlers and Tasklets
939 ixv_enable_queue(struct adapter *adapter, u32 vector)
941 struct ixgbe_hw *hw = &adapter->hw;
942 u32 queue = 1 << vector;
945 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
946 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
950 ixv_disable_queue(struct adapter *adapter, u32 vector)
952 struct ixgbe_hw *hw = &adapter->hw;
953 u64 queue = (u64)(1 << vector);
956 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
957 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
961 ixv_rearm_queues(struct adapter *adapter, u64 queues)
963 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
964 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
969 ixv_handle_que(void *context, int pending)
971 struct ix_queue *que = context;
972 struct adapter *adapter = que->adapter;
973 struct tx_ring *txr = que->txr;
974 struct ifnet *ifp = adapter->ifp;
977 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
978 more = ixv_rxeof(que, adapter->rx_process_limit);
981 #if __FreeBSD_version >= 800000
982 if (!drbr_empty(ifp, txr->br))
983 ixv_mq_start_locked(ifp, txr, NULL);
985 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
986 ixv_start_locked(txr, ifp);
990 taskqueue_enqueue(que->tq, &que->que_task);
995 /* Reenable this interrupt */
996 ixv_enable_queue(adapter, que->msix);
1000 /*********************************************************************
1002 * MSI Queue Interrupt Service routine
1004 **********************************************************************/
1006 ixv_msix_que(void *arg)
1008 struct ix_queue *que = arg;
1009 struct adapter *adapter = que->adapter;
1010 struct tx_ring *txr = que->txr;
1011 struct rx_ring *rxr = que->rxr;
1012 bool more_tx, more_rx;
1015 ixv_disable_queue(adapter, que->msix);
1018 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1021 more_tx = ixv_txeof(txr);
1023 ** Make certain that if the stack
1024 ** has anything queued the task gets
1025 ** scheduled to handle it.
1027 #if __FreeBSD_version < 800000
1028 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1030 if (!drbr_empty(adapter->ifp, txr->br))
1035 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1039 if (ixv_enable_aim == FALSE)
1042 ** Do Adaptive Interrupt Moderation:
1043 ** - Write out last calculated setting
1044 ** - Calculate based on average size over
1045 ** the last interval.
1047 if (que->eitr_setting)
1048 IXGBE_WRITE_REG(&adapter->hw,
1049 IXGBE_VTEITR(que->msix),
1052 que->eitr_setting = 0;
1054 /* Idle, do nothing */
1055 if ((txr->bytes == 0) && (rxr->bytes == 0))
1058 if ((txr->bytes) && (txr->packets))
1059 newitr = txr->bytes/txr->packets;
1060 if ((rxr->bytes) && (rxr->packets))
1061 newitr = max(newitr,
1062 (rxr->bytes / rxr->packets));
1063 newitr += 24; /* account for hardware frame, crc */
1065 /* set an upper boundary */
1066 newitr = min(newitr, 3000);
1068 /* Be nice to the mid range */
1069 if ((newitr > 300) && (newitr < 1200))
1070 newitr = (newitr / 3);
1072 newitr = (newitr / 2);
1074 newitr |= newitr << 16;
1076 /* save for next interrupt */
1077 que->eitr_setting = newitr;
1086 if (more_tx || more_rx)
1087 taskqueue_enqueue(que->tq, &que->que_task);
1088 else /* Reenable this interrupt */
1089 ixv_enable_queue(adapter, que->msix);
1094 ixv_msix_mbx(void *arg)
1096 struct adapter *adapter = arg;
1097 struct ixgbe_hw *hw = &adapter->hw;
1102 /* First get the cause */
1103 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1104 /* Clear interrupt with write */
1105 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1107 /* Link status change */
1108 if (reg & IXGBE_EICR_LSC)
1109 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1111 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1115 /*********************************************************************
1117 * Media Ioctl callback
1119 * This routine is called whenever the user queries the status of
1120 * the interface using ifconfig.
1122 **********************************************************************/
1124 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1126 struct adapter *adapter = ifp->if_softc;
1128 INIT_DEBUGOUT("ixv_media_status: begin");
1129 IXV_CORE_LOCK(adapter);
1130 ixv_update_link_status(adapter);
1132 ifmr->ifm_status = IFM_AVALID;
1133 ifmr->ifm_active = IFM_ETHER;
1135 if (!adapter->link_active) {
1136 IXV_CORE_UNLOCK(adapter);
1140 ifmr->ifm_status |= IFM_ACTIVE;
1142 switch (adapter->link_speed) {
1143 case IXGBE_LINK_SPEED_1GB_FULL:
1144 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1146 case IXGBE_LINK_SPEED_10GB_FULL:
1147 ifmr->ifm_active |= IFM_FDX;
1151 IXV_CORE_UNLOCK(adapter);
1156 /*********************************************************************
1158 * Media Ioctl callback
1160 * This routine is called when the user changes speed/duplex using
1161 * media/mediopt option with ifconfig.
1163 **********************************************************************/
1165 ixv_media_change(struct ifnet * ifp)
1167 struct adapter *adapter = ifp->if_softc;
1168 struct ifmedia *ifm = &adapter->media;
1170 INIT_DEBUGOUT("ixv_media_change: begin");
1172 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1175 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1179 device_printf(adapter->dev, "Only auto media type\n");
1186 /*********************************************************************
1188 * This routine maps the mbufs to tx descriptors, allowing the
1189 * TX engine to transmit the packets.
1190 * - return 0 on success, positive on failure
1192 **********************************************************************/
1195 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1197 struct adapter *adapter = txr->adapter;
1198 u32 olinfo_status = 0, cmd_type_len;
1200 int i, j, error, nsegs;
1201 int first, last = 0;
1202 struct mbuf *m_head;
1203 bus_dma_segment_t segs[32];
1205 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1206 union ixgbe_adv_tx_desc *txd = NULL;
1210 /* Basic descriptor defines */
1211 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1212 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1214 if (m_head->m_flags & M_VLANTAG)
1215 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1218 * Important to capture the first descriptor
1219 * used because it will contain the index of
1220 * the one we tell the hardware to report back
1222 first = txr->next_avail_desc;
1223 txbuf = &txr->tx_buffers[first];
1224 txbuf_mapped = txbuf;
1228 * Map the packet for DMA.
1230 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1231 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1233 if (error == EFBIG) {
1236 m = m_defrag(*m_headp, M_DONTWAIT);
1238 adapter->mbuf_defrag_failed++;
1246 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1247 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1249 if (error == ENOMEM) {
1250 adapter->no_tx_dma_setup++;
1252 } else if (error != 0) {
1253 adapter->no_tx_dma_setup++;
1258 } else if (error == ENOMEM) {
1259 adapter->no_tx_dma_setup++;
1261 } else if (error != 0) {
1262 adapter->no_tx_dma_setup++;
1268 /* Make certain there are enough descriptors */
1269 if (nsegs > txr->tx_avail - 2) {
1270 txr->no_desc_avail++;
1277 ** Set up the appropriate offload context
1278 ** this becomes the first descriptor of
1281 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1282 if (ixv_tso_setup(txr, m_head, &paylen)) {
1283 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1284 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1285 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1286 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1290 } else if (ixv_tx_ctx_setup(txr, m_head))
1291 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1293 /* Record payload length */
1295 olinfo_status |= m_head->m_pkthdr.len <<
1296 IXGBE_ADVTXD_PAYLEN_SHIFT;
1298 i = txr->next_avail_desc;
1299 for (j = 0; j < nsegs; j++) {
1303 txbuf = &txr->tx_buffers[i];
1304 txd = &txr->tx_base[i];
1305 seglen = segs[j].ds_len;
1306 segaddr = htole64(segs[j].ds_addr);
1308 txd->read.buffer_addr = segaddr;
1309 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1310 cmd_type_len |seglen);
1311 txd->read.olinfo_status = htole32(olinfo_status);
1312 last = i; /* descriptor that will get completion IRQ */
1314 if (++i == adapter->num_tx_desc)
1317 txbuf->m_head = NULL;
1318 txbuf->eop_index = -1;
1321 txd->read.cmd_type_len |=
1322 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1323 txr->tx_avail -= nsegs;
1324 txr->next_avail_desc = i;
1326 txbuf->m_head = m_head;
1327 txr->tx_buffers[first].map = txbuf->map;
1329 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1331 /* Set the index of the descriptor that will be marked done */
1332 txbuf = &txr->tx_buffers[first];
1333 txbuf->eop_index = last;
1335 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1336 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1338 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1339 * hardware that this frame is available to transmit.
1341 ++txr->total_packets;
1342 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1347 bus_dmamap_unload(txr->txtag, txbuf->map);
1353 /*********************************************************************
1356 * This routine is called whenever multicast address list is updated.
1358 **********************************************************************/
1359 #define IXGBE_RAR_ENTRIES 16
1362 ixv_set_multi(struct adapter *adapter)
1364 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1366 struct ifmultiaddr *ifma;
1368 struct ifnet *ifp = adapter->ifp;
1370 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1372 #if __FreeBSD_version < 800000
1375 if_maddr_rlock(ifp);
1377 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1378 if (ifma->ifma_addr->sa_family != AF_LINK)
1380 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1381 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1382 IXGBE_ETH_LENGTH_OF_ADDRESS);
1385 #if __FreeBSD_version < 800000
1386 IF_ADDR_UNLOCK(ifp);
1388 if_maddr_runlock(ifp);
1393 ixgbe_update_mc_addr_list(&adapter->hw,
1394 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1400 * This is an iterator function now needed by the multicast
1401 * shared code. It simply feeds the shared code routine the
1402 * addresses in the array of ixv_set_multi() one by one.
1405 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1407 u8 *addr = *update_ptr;
1411 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1412 *update_ptr = newptr;
1416 /*********************************************************************
1419 * This routine checks for link status,updates statistics,
1420 * and runs the watchdog check.
1422 **********************************************************************/
1425 ixv_local_timer(void *arg)
1427 struct adapter *adapter = arg;
1428 device_t dev = adapter->dev;
1429 struct tx_ring *txr = adapter->tx_rings;
1432 mtx_assert(&adapter->core_mtx, MA_OWNED);
1434 ixv_update_link_status(adapter);
1437 ixv_update_stats(adapter);
1440 * If the interface has been paused
1441 * then don't do the watchdog check
1443 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1446 ** Check for time since any descriptor was cleaned
1448 for (i = 0; i < adapter->num_queues; i++, txr++) {
1450 if (txr->watchdog_check == FALSE) {
1454 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1459 ixv_rearm_queues(adapter, adapter->que_mask);
1460 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1464 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1465 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1466 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1467 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1468 device_printf(dev,"TX(%d) desc avail = %d,"
1469 "Next TX to Clean = %d\n",
1470 txr->me, txr->tx_avail, txr->next_to_clean);
1471 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1472 adapter->watchdog_events++;
1474 ixv_init_locked(adapter);
1478 ** Note: this routine updates the OS on the link state
1479 ** the real check of the hardware only happens with
1480 ** a link interrupt.
1483 ixv_update_link_status(struct adapter *adapter)
1485 struct ifnet *ifp = adapter->ifp;
1486 struct tx_ring *txr = adapter->tx_rings;
1487 device_t dev = adapter->dev;
1490 if (adapter->link_up){
1491 if (adapter->link_active == FALSE) {
1493 device_printf(dev,"Link is up %d Gbps %s \n",
1494 ((adapter->link_speed == 128)? 10:1),
1496 adapter->link_active = TRUE;
1497 if_link_state_change(ifp, LINK_STATE_UP);
1499 } else { /* Link down */
1500 if (adapter->link_active == TRUE) {
1502 device_printf(dev,"Link is Down\n");
1503 if_link_state_change(ifp, LINK_STATE_DOWN);
1504 adapter->link_active = FALSE;
1505 for (int i = 0; i < adapter->num_queues;
1507 txr->watchdog_check = FALSE;
1515 /*********************************************************************
1517 * This routine disables all traffic on the adapter by issuing a
1518 * global reset on the MAC and deallocates TX/RX buffers.
1520 **********************************************************************/
1526 struct adapter *adapter = arg;
1527 struct ixgbe_hw *hw = &adapter->hw;
1530 mtx_assert(&adapter->core_mtx, MA_OWNED);
1532 INIT_DEBUGOUT("ixv_stop: begin\n");
1533 ixv_disable_intr(adapter);
1535 /* Tell the stack that the interface is no longer active */
1536 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1539 adapter->hw.adapter_stopped = FALSE;
1540 ixgbe_stop_adapter(hw);
1541 callout_stop(&adapter->timer);
1543 /* reprogram the RAR[0] in case user changed it. */
1544 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1550 /*********************************************************************
1552 * Determine hardware revision.
1554 **********************************************************************/
1556 ixv_identify_hardware(struct adapter *adapter)
1558 device_t dev = adapter->dev;
1562 ** Make sure BUSMASTER is set, on a VM under
1563 ** KVM it may not be and will break things.
1565 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1566 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1567 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1568 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1569 "bits were not set!\n");
1570 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1571 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1574 /* Save off the information about this board */
1575 adapter->hw.vendor_id = pci_get_vendor(dev);
1576 adapter->hw.device_id = pci_get_device(dev);
1577 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1578 adapter->hw.subsystem_vendor_id =
1579 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1580 adapter->hw.subsystem_device_id =
1581 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1586 /*********************************************************************
1588 * Setup MSIX Interrupt resources and handlers
1590 **********************************************************************/
1592 ixv_allocate_msix(struct adapter *adapter)
1594 device_t dev = adapter->dev;
1595 struct ix_queue *que = adapter->queues;
1596 int error, rid, vector = 0;
1598 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1600 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1601 RF_SHAREABLE | RF_ACTIVE);
1602 if (que->res == NULL) {
1603 device_printf(dev,"Unable to allocate"
1604 " bus resource: que interrupt [%d]\n", vector);
1607 /* Set the handler function */
1608 error = bus_setup_intr(dev, que->res,
1609 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1610 ixv_msix_que, que, &que->tag);
1613 device_printf(dev, "Failed to register QUE handler");
1616 #if __FreeBSD_version >= 800504
1617 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1620 adapter->que_mask |= (u64)(1 << que->msix);
1622 ** Bind the msix vector, and thus the
1623 ** ring to the corresponding cpu.
1625 if (adapter->num_queues > 1)
1626 bus_bind_intr(dev, que->res, i);
1628 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1629 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1630 taskqueue_thread_enqueue, &que->tq);
1631 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1632 device_get_nameunit(adapter->dev));
1637 adapter->res = bus_alloc_resource_any(dev,
1638 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1639 if (!adapter->res) {
1640 device_printf(dev,"Unable to allocate"
1641 " bus resource: MBX interrupt [%d]\n", rid);
1644 /* Set the mbx handler function */
1645 error = bus_setup_intr(dev, adapter->res,
1646 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1647 ixv_msix_mbx, adapter, &adapter->tag);
1649 adapter->res = NULL;
1650 device_printf(dev, "Failed to register LINK handler");
1653 #if __FreeBSD_version >= 800504
1654 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1656 adapter->mbxvec = vector;
1657 /* Tasklets for Mailbox */
1658 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1659 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1660 taskqueue_thread_enqueue, &adapter->tq);
1661 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1662 device_get_nameunit(adapter->dev));
1664 ** Due to a broken design QEMU will fail to properly
1665 ** enable the guest for MSIX unless the vectors in
1666 ** the table are all set up, so we must rewrite the
1667 ** ENABLE in the MSIX control register again at this
1668 ** point to cause it to successfully initialize us.
1670 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1672 pci_find_cap(dev, PCIY_MSIX, &rid);
1673 rid += PCIR_MSIX_CTRL;
1674 msix_ctrl = pci_read_config(dev, rid, 2);
1675 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1676 pci_write_config(dev, rid, msix_ctrl, 2);
1683 * Setup MSIX resources, note that the VF
1684 * device MUST use MSIX, there is no fallback.
1687 ixv_setup_msix(struct adapter *adapter)
1689 device_t dev = adapter->dev;
1690 int rid, vectors, want = 2;
1693 /* First try MSI/X */
1695 adapter->msix_mem = bus_alloc_resource_any(dev,
1696 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1697 if (!adapter->msix_mem) {
1698 device_printf(adapter->dev,
1699 "Unable to map MSIX table \n");
1703 vectors = pci_msix_count(dev);
1705 bus_release_resource(dev, SYS_RES_MEMORY,
1706 rid, adapter->msix_mem);
1707 adapter->msix_mem = NULL;
1712 ** Want two vectors: one for a queue,
1713 ** plus an additional for mailbox.
1715 if (pci_alloc_msix(dev, &want) == 0) {
1716 device_printf(adapter->dev,
1717 "Using MSIX interrupts with %d vectors\n", want);
1721 device_printf(adapter->dev,"MSIX config error\n");
1727 ixv_allocate_pci_resources(struct adapter *adapter)
1730 device_t dev = adapter->dev;
1733 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1736 if (!(adapter->pci_mem)) {
1737 device_printf(dev,"Unable to allocate bus resource: memory\n");
1741 adapter->osdep.mem_bus_space_tag =
1742 rman_get_bustag(adapter->pci_mem);
1743 adapter->osdep.mem_bus_space_handle =
1744 rman_get_bushandle(adapter->pci_mem);
1745 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1747 adapter->num_queues = 1;
1748 adapter->hw.back = &adapter->osdep;
1751 ** Now setup MSI/X, should
1752 ** return us the number of
1753 ** configured vectors.
1755 adapter->msix = ixv_setup_msix(adapter);
1756 if (adapter->msix == ENXIO)
1763 ixv_free_pci_resources(struct adapter * adapter)
1765 struct ix_queue *que = adapter->queues;
1766 device_t dev = adapter->dev;
1769 memrid = PCIR_BAR(MSIX_BAR);
1772 ** There is a slight possibility of a failure mode
1773 ** in attach that will result in entering this function
1774 ** before interrupt resources have been initialized, and
1775 ** in that case we do not want to execute the loops below
1776 ** We can detect this reliably by the state of the adapter
1779 if (adapter->res == NULL)
1783 ** Release all msix queue resources:
1785 for (int i = 0; i < adapter->num_queues; i++, que++) {
1786 rid = que->msix + 1;
1787 if (que->tag != NULL) {
1788 bus_teardown_intr(dev, que->res, que->tag);
1791 if (que->res != NULL)
1792 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1796 /* Clean the Legacy or Link interrupt last */
1797 if (adapter->mbxvec) /* we are doing MSIX */
1798 rid = adapter->mbxvec + 1;
1800 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1802 if (adapter->tag != NULL) {
1803 bus_teardown_intr(dev, adapter->res, adapter->tag);
1804 adapter->tag = NULL;
1806 if (adapter->res != NULL)
1807 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1811 pci_release_msi(dev);
1813 if (adapter->msix_mem != NULL)
1814 bus_release_resource(dev, SYS_RES_MEMORY,
1815 memrid, adapter->msix_mem);
1817 if (adapter->pci_mem != NULL)
1818 bus_release_resource(dev, SYS_RES_MEMORY,
1819 PCIR_BAR(0), adapter->pci_mem);
1824 /*********************************************************************
1826 * Setup networking device structure and register an interface.
1828 **********************************************************************/
1830 ixv_setup_interface(device_t dev, struct adapter *adapter)
1834 INIT_DEBUGOUT("ixv_setup_interface: begin");
1836 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1838 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1839 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1840 ifp->if_baudrate = 1000000000;
1841 ifp->if_init = ixv_init;
1842 ifp->if_softc = adapter;
1843 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1844 ifp->if_ioctl = ixv_ioctl;
1845 #if __FreeBSD_version >= 800000
1846 ifp->if_transmit = ixv_mq_start;
1847 ifp->if_qflush = ixv_qflush;
1849 ifp->if_start = ixv_start;
1851 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1853 ether_ifattach(ifp, adapter->hw.mac.addr);
1855 adapter->max_frame_size =
1856 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1859 * Tell the upper layer(s) we support long frames.
1861 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1863 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1864 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1865 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1868 ifp->if_capenable = ifp->if_capabilities;
1870 /* Don't enable LRO by default */
1871 ifp->if_capabilities |= IFCAP_LRO;
1874 * Specify the media types supported by this adapter and register
1875 * callbacks to update media and link information
1877 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1879 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1880 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1881 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1887 ixv_config_link(struct adapter *adapter)
1889 struct ixgbe_hw *hw = &adapter->hw;
1890 u32 autoneg, err = 0;
1891 bool negotiate = TRUE;
1893 if (hw->mac.ops.check_link)
1894 err = hw->mac.ops.check_link(hw, &autoneg,
1895 &adapter->link_up, FALSE);
1899 if (hw->mac.ops.setup_link)
1900 err = hw->mac.ops.setup_link(hw, autoneg,
1901 negotiate, adapter->link_up);
1906 /********************************************************************
1907 * Manage DMA'able memory.
1908 *******************************************************************/
1910 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1914 *(bus_addr_t *) arg = segs->ds_addr;
1919 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1920 struct ixv_dma_alloc *dma, int mapflags)
1922 device_t dev = adapter->dev;
1925 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1926 DBA_ALIGN, 0, /* alignment, bounds */
1927 BUS_SPACE_MAXADDR, /* lowaddr */
1928 BUS_SPACE_MAXADDR, /* highaddr */
1929 NULL, NULL, /* filter, filterarg */
1932 size, /* maxsegsize */
1933 BUS_DMA_ALLOCNOW, /* flags */
1934 NULL, /* lockfunc */
1935 NULL, /* lockfuncarg */
1938 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1942 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1943 BUS_DMA_NOWAIT, &dma->dma_map);
1945 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1949 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1953 mapflags | BUS_DMA_NOWAIT);
1955 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1959 dma->dma_size = size;
1962 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1964 bus_dma_tag_destroy(dma->dma_tag);
1966 dma->dma_map = NULL;
1967 dma->dma_tag = NULL;
1972 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1974 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1975 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1976 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1977 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1978 bus_dma_tag_destroy(dma->dma_tag);
1982 /*********************************************************************
1984 * Allocate memory for the transmit and receive rings, and then
1985 * the descriptors associated with each, called only once at attach.
1987 **********************************************************************/
1989 ixv_allocate_queues(struct adapter *adapter)
1991 device_t dev = adapter->dev;
1992 struct ix_queue *que;
1993 struct tx_ring *txr;
1994 struct rx_ring *rxr;
1995 int rsize, tsize, error = 0;
1996 int txconf = 0, rxconf = 0;
1998 /* First allocate the top level queue structs */
1999 if (!(adapter->queues =
2000 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2001 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2002 device_printf(dev, "Unable to allocate queue memory\n");
2007 /* First allocate the TX ring struct memory */
2008 if (!(adapter->tx_rings =
2009 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2010 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2011 device_printf(dev, "Unable to allocate TX ring memory\n");
2016 /* Next allocate the RX */
2017 if (!(adapter->rx_rings =
2018 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2019 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2020 device_printf(dev, "Unable to allocate RX ring memory\n");
2025 /* For the ring itself */
2026 tsize = roundup2(adapter->num_tx_desc *
2027 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2030 * Now set up the TX queues, txconf is needed to handle the
2031 * possibility that things fail midcourse and we need to
2032 * undo memory gracefully
2034 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2035 /* Set up some basics */
2036 txr = &adapter->tx_rings[i];
2037 txr->adapter = adapter;
2040 /* Initialize the TX side lock */
2041 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2042 device_get_nameunit(dev), txr->me);
2043 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2045 if (ixv_dma_malloc(adapter, tsize,
2046 &txr->txdma, BUS_DMA_NOWAIT)) {
2048 "Unable to allocate TX Descriptor memory\n");
2052 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2053 bzero((void *)txr->tx_base, tsize);
2055 /* Now allocate transmit buffers for the ring */
2056 if (ixv_allocate_transmit_buffers(txr)) {
2058 "Critical Failure setting up transmit buffers\n");
2062 #if __FreeBSD_version >= 800000
2063 /* Allocate a buf ring */
2064 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2065 M_WAITOK, &txr->tx_mtx);
2066 if (txr->br == NULL) {
2068 "Critical Failure setting up buf ring\n");
2076 * Next the RX queues...
2078 rsize = roundup2(adapter->num_rx_desc *
2079 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2080 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2081 rxr = &adapter->rx_rings[i];
2082 /* Set up some basics */
2083 rxr->adapter = adapter;
2086 /* Initialize the RX side lock */
2087 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2088 device_get_nameunit(dev), rxr->me);
2089 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2091 if (ixv_dma_malloc(adapter, rsize,
2092 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2094 "Unable to allocate RxDescriptor memory\n");
2098 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2099 bzero((void *)rxr->rx_base, rsize);
2101 /* Allocate receive buffers for the ring*/
2102 if (ixv_allocate_receive_buffers(rxr)) {
2104 "Critical Failure setting up receive buffers\n");
2111 ** Finally set up the queue holding structs
2113 for (int i = 0; i < adapter->num_queues; i++) {
2114 que = &adapter->queues[i];
2115 que->adapter = adapter;
2116 que->txr = &adapter->tx_rings[i];
2117 que->rxr = &adapter->rx_rings[i];
2123 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2124 ixv_dma_free(adapter, &rxr->rxdma);
2126 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2127 ixv_dma_free(adapter, &txr->txdma);
2128 free(adapter->rx_rings, M_DEVBUF);
2130 free(adapter->tx_rings, M_DEVBUF);
2132 free(adapter->queues, M_DEVBUF);
2138 /*********************************************************************
2140 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2141 * the information needed to transmit a packet on the wire. This is
2142 * called only once at attach, setup is done every reset.
2144 **********************************************************************/
2146 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2148 struct adapter *adapter = txr->adapter;
2149 device_t dev = adapter->dev;
2150 struct ixv_tx_buf *txbuf;
2154 * Setup DMA descriptor areas.
2156 if ((error = bus_dma_tag_create(
2157 bus_get_dma_tag(adapter->dev), /* parent */
2158 1, 0, /* alignment, bounds */
2159 BUS_SPACE_MAXADDR, /* lowaddr */
2160 BUS_SPACE_MAXADDR, /* highaddr */
2161 NULL, NULL, /* filter, filterarg */
2162 IXV_TSO_SIZE, /* maxsize */
2164 PAGE_SIZE, /* maxsegsize */
2166 NULL, /* lockfunc */
2167 NULL, /* lockfuncarg */
2169 device_printf(dev,"Unable to allocate TX DMA tag\n");
2173 if (!(txr->tx_buffers =
2174 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2175 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2176 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2181 /* Create the descriptor buffer dma maps */
2182 txbuf = txr->tx_buffers;
2183 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2184 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2186 device_printf(dev, "Unable to create TX DMA map\n");
2193 /* We free all, it handles case where we are in the middle */
2194 ixv_free_transmit_structures(adapter);
2198 /*********************************************************************
2200 * Initialize a transmit ring.
2202 **********************************************************************/
2204 ixv_setup_transmit_ring(struct tx_ring *txr)
2206 struct adapter *adapter = txr->adapter;
2207 struct ixv_tx_buf *txbuf;
2210 /* Clear the old ring contents */
2212 bzero((void *)txr->tx_base,
2213 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2215 txr->next_avail_desc = 0;
2216 txr->next_to_clean = 0;
2218 /* Free any existing tx buffers. */
2219 txbuf = txr->tx_buffers;
2220 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2221 if (txbuf->m_head != NULL) {
2222 bus_dmamap_sync(txr->txtag, txbuf->map,
2223 BUS_DMASYNC_POSTWRITE);
2224 bus_dmamap_unload(txr->txtag, txbuf->map);
2225 m_freem(txbuf->m_head);
2226 txbuf->m_head = NULL;
2228 /* Clear the EOP index */
2229 txbuf->eop_index = -1;
2232 /* Set number of descriptors available */
2233 txr->tx_avail = adapter->num_tx_desc;
2235 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2236 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2240 /*********************************************************************
2242 * Initialize all transmit rings.
2244 **********************************************************************/
2246 ixv_setup_transmit_structures(struct adapter *adapter)
2248 struct tx_ring *txr = adapter->tx_rings;
2250 for (int i = 0; i < adapter->num_queues; i++, txr++)
2251 ixv_setup_transmit_ring(txr);
2256 /*********************************************************************
2258 * Enable transmit unit.
2260 **********************************************************************/
2262 ixv_initialize_transmit_units(struct adapter *adapter)
2264 struct tx_ring *txr = adapter->tx_rings;
2265 struct ixgbe_hw *hw = &adapter->hw;
2268 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2269 u64 tdba = txr->txdma.dma_paddr;
2272 /* Set WTHRESH to 8, burst writeback */
2273 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2274 txdctl |= (8 << 16);
2275 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2277 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2278 txdctl |= IXGBE_TXDCTL_ENABLE;
2279 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2281 /* Set the HW Tx Head and Tail indices */
2282 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2283 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2285 /* Setup Transmit Descriptor Cmd Settings */
2286 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2287 txr->watchdog_check = FALSE;
2289 /* Set Ring parameters */
2290 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2291 (tdba & 0x00000000ffffffffULL));
2292 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2293 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2294 adapter->num_tx_desc *
2295 sizeof(struct ixgbe_legacy_tx_desc));
2296 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2297 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2298 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2305 /*********************************************************************
2307 * Free all transmit rings.
2309 **********************************************************************/
2311 ixv_free_transmit_structures(struct adapter *adapter)
2313 struct tx_ring *txr = adapter->tx_rings;
2315 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2317 ixv_free_transmit_buffers(txr);
2318 ixv_dma_free(adapter, &txr->txdma);
2320 IXV_TX_LOCK_DESTROY(txr);
2322 free(adapter->tx_rings, M_DEVBUF);
2325 /*********************************************************************
2327 * Free transmit ring related data structures.
2329 **********************************************************************/
2331 ixv_free_transmit_buffers(struct tx_ring *txr)
2333 struct adapter *adapter = txr->adapter;
2334 struct ixv_tx_buf *tx_buffer;
2337 INIT_DEBUGOUT("free_transmit_ring: begin");
2339 if (txr->tx_buffers == NULL)
2342 tx_buffer = txr->tx_buffers;
2343 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2344 if (tx_buffer->m_head != NULL) {
2345 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2346 BUS_DMASYNC_POSTWRITE);
2347 bus_dmamap_unload(txr->txtag,
2349 m_freem(tx_buffer->m_head);
2350 tx_buffer->m_head = NULL;
2351 if (tx_buffer->map != NULL) {
2352 bus_dmamap_destroy(txr->txtag,
2354 tx_buffer->map = NULL;
2356 } else if (tx_buffer->map != NULL) {
2357 bus_dmamap_unload(txr->txtag,
2359 bus_dmamap_destroy(txr->txtag,
2361 tx_buffer->map = NULL;
2364 #if __FreeBSD_version >= 800000
2365 if (txr->br != NULL)
2366 buf_ring_free(txr->br, M_DEVBUF);
2368 if (txr->tx_buffers != NULL) {
2369 free(txr->tx_buffers, M_DEVBUF);
2370 txr->tx_buffers = NULL;
2372 if (txr->txtag != NULL) {
2373 bus_dma_tag_destroy(txr->txtag);
2379 /*********************************************************************
2381 * Advanced Context Descriptor setup for VLAN or CSUM
2383 **********************************************************************/
2386 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2388 struct adapter *adapter = txr->adapter;
2389 struct ixgbe_adv_tx_context_desc *TXD;
2390 struct ixv_tx_buf *tx_buffer;
2391 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2392 struct ether_vlan_header *eh;
2394 struct ip6_hdr *ip6;
2395 int ehdrlen, ip_hlen = 0;
2398 bool offload = TRUE;
2399 int ctxd = txr->next_avail_desc;
2403 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2407 tx_buffer = &txr->tx_buffers[ctxd];
2408 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2411 ** In advanced descriptors the vlan tag must
2412 ** be placed into the descriptor itself.
2414 if (mp->m_flags & M_VLANTAG) {
2415 vtag = htole16(mp->m_pkthdr.ether_vtag);
2416 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2417 } else if (offload == FALSE)
2421 * Determine where frame payload starts.
2422 * Jump over vlan headers if already present,
2423 * helpful for QinQ too.
2425 eh = mtod(mp, struct ether_vlan_header *);
2426 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2427 etype = ntohs(eh->evl_proto);
2428 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2430 etype = ntohs(eh->evl_encap_proto);
2431 ehdrlen = ETHER_HDR_LEN;
2434 /* Set the ether header length */
2435 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2439 ip = (struct ip *)(mp->m_data + ehdrlen);
2440 ip_hlen = ip->ip_hl << 2;
2441 if (mp->m_len < ehdrlen + ip_hlen)
2444 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2446 case ETHERTYPE_IPV6:
2447 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2448 ip_hlen = sizeof(struct ip6_hdr);
2449 if (mp->m_len < ehdrlen + ip_hlen)
2451 ipproto = ip6->ip6_nxt;
2452 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2459 vlan_macip_lens |= ip_hlen;
2460 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2464 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2465 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2469 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2470 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2473 #if __FreeBSD_version >= 800000
2475 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2476 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2484 /* Now copy bits into descriptor */
2485 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2486 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2487 TXD->seqnum_seed = htole32(0);
2488 TXD->mss_l4len_idx = htole32(0);
2490 tx_buffer->m_head = NULL;
2491 tx_buffer->eop_index = -1;
2493 /* We've consumed the first desc, adjust counters */
2494 if (++ctxd == adapter->num_tx_desc)
2496 txr->next_avail_desc = ctxd;
2502 /**********************************************************************
2504 * Setup work for hardware segmentation offload (TSO) on
2505 * adapters using advanced tx descriptors
2507 **********************************************************************/
2509 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2511 struct adapter *adapter = txr->adapter;
2512 struct ixgbe_adv_tx_context_desc *TXD;
2513 struct ixv_tx_buf *tx_buffer;
2514 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2515 u32 mss_l4len_idx = 0;
2517 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2518 struct ether_vlan_header *eh;
2524 * Determine where frame payload starts.
2525 * Jump over vlan headers if already present
2527 eh = mtod(mp, struct ether_vlan_header *);
2528 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2529 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2531 ehdrlen = ETHER_HDR_LEN;
2533 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2534 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2537 ctxd = txr->next_avail_desc;
2538 tx_buffer = &txr->tx_buffers[ctxd];
2539 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2541 ip = (struct ip *)(mp->m_data + ehdrlen);
2542 if (ip->ip_p != IPPROTO_TCP)
2543 return FALSE; /* 0 */
2545 ip_hlen = ip->ip_hl << 2;
2546 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2547 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2548 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2549 tcp_hlen = th->th_off << 2;
2550 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2552 /* This is used in the transmit desc in encap */
2553 *paylen = mp->m_pkthdr.len - hdrlen;
2555 /* VLAN MACLEN IPLEN */
2556 if (mp->m_flags & M_VLANTAG) {
2557 vtag = htole16(mp->m_pkthdr.ether_vtag);
2558 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2561 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2562 vlan_macip_lens |= ip_hlen;
2563 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2565 /* ADV DTYPE TUCMD */
2566 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2567 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2568 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2569 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2573 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2574 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2575 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2577 TXD->seqnum_seed = htole32(0);
2578 tx_buffer->m_head = NULL;
2579 tx_buffer->eop_index = -1;
2581 if (++ctxd == adapter->num_tx_desc)
2585 txr->next_avail_desc = ctxd;
2590 /**********************************************************************
2592 * Examine each tx_buffer in the used queue. If the hardware is done
2593 * processing the packet then free associated resources. The
2594 * tx_buffer is put back on the free queue.
2596 **********************************************************************/
2598 ixv_txeof(struct tx_ring *txr)
2600 struct adapter *adapter = txr->adapter;
2601 struct ifnet *ifp = adapter->ifp;
2602 u32 first, last, done;
2603 struct ixv_tx_buf *tx_buffer;
2604 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2606 mtx_assert(&txr->tx_mtx, MA_OWNED);
2608 if (txr->tx_avail == adapter->num_tx_desc)
2611 first = txr->next_to_clean;
2612 tx_buffer = &txr->tx_buffers[first];
2613 /* For cleanup we just use legacy struct */
2614 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2615 last = tx_buffer->eop_index;
2618 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2621 ** Get the index of the first descriptor
2622 ** BEYOND the EOP and call that 'done'.
2623 ** I do this so the comparison in the
2624 ** inner while loop below can be simple
2626 if (++last == adapter->num_tx_desc) last = 0;
2629 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2630 BUS_DMASYNC_POSTREAD);
2632 ** Only the EOP descriptor of a packet now has the DD
2633 ** bit set, this is what we look for...
2635 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2636 /* We clean the range of the packet */
2637 while (first != done) {
2638 tx_desc->upper.data = 0;
2639 tx_desc->lower.data = 0;
2640 tx_desc->buffer_addr = 0;
2643 if (tx_buffer->m_head) {
2644 bus_dmamap_sync(txr->txtag,
2646 BUS_DMASYNC_POSTWRITE);
2647 bus_dmamap_unload(txr->txtag,
2649 m_freem(tx_buffer->m_head);
2650 tx_buffer->m_head = NULL;
2651 tx_buffer->map = NULL;
2653 tx_buffer->eop_index = -1;
2654 txr->watchdog_time = ticks;
2656 if (++first == adapter->num_tx_desc)
2659 tx_buffer = &txr->tx_buffers[first];
2661 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2664 /* See if there is more work now */
2665 last = tx_buffer->eop_index;
2668 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2669 /* Get next done point */
2670 if (++last == adapter->num_tx_desc) last = 0;
2675 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2676 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2678 txr->next_to_clean = first;
2681 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2682 * it is OK to send packets. If there are no pending descriptors,
2683 * clear the timeout. Otherwise, if some descriptors have been freed,
2684 * restart the timeout.
2686 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2687 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2688 if (txr->tx_avail == adapter->num_tx_desc) {
2689 txr->watchdog_check = FALSE;
2697 /*********************************************************************
2699 * Refresh mbuf buffers for RX descriptor rings
2700 * - now keeps its own state so discards due to resource
2701 * exhaustion are unnecessary, if an mbuf cannot be obtained
2702 * it just returns, keeping its placeholder, thus it can simply
2703 * be recalled to try again.
2705 **********************************************************************/
2707 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2709 struct adapter *adapter = rxr->adapter;
2710 bus_dma_segment_t hseg[1];
2711 bus_dma_segment_t pseg[1];
2712 struct ixv_rx_buf *rxbuf;
2713 struct mbuf *mh, *mp;
2714 int i, j, nsegs, error;
2715 bool refreshed = FALSE;
2717 i = j = rxr->next_to_refresh;
2718 /* Get the control variable, one beyond refresh point */
2719 if (++j == adapter->num_rx_desc)
2721 while (j != limit) {
2722 rxbuf = &rxr->rx_buffers[i];
2723 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2724 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2727 mh->m_pkthdr.len = mh->m_len = MHLEN;
2729 mh->m_flags |= M_PKTHDR;
2730 m_adj(mh, ETHER_ALIGN);
2731 /* Get the memory mapping */
2732 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2733 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2735 printf("GET BUF: dmamap load"
2736 " failure - %d\n", error);
2741 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2742 BUS_DMASYNC_PREREAD);
2743 rxr->rx_base[i].read.hdr_addr =
2744 htole64(hseg[0].ds_addr);
2747 if (rxbuf->m_pack == NULL) {
2748 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2749 M_PKTHDR, adapter->rx_mbuf_sz);
2755 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2756 /* Get the memory mapping */
2757 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2758 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2760 printf("GET BUF: dmamap load"
2761 " failure - %d\n", error);
2763 rxbuf->m_pack = NULL;
2767 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2768 BUS_DMASYNC_PREREAD);
2769 rxr->rx_base[i].read.pkt_addr =
2770 htole64(pseg[0].ds_addr);
2773 rxr->next_to_refresh = i = j;
2774 /* Calculate next index */
2775 if (++j == adapter->num_rx_desc)
2779 if (refreshed) /* update tail index */
2780 IXGBE_WRITE_REG(&adapter->hw,
2781 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2785 /*********************************************************************
2787 * Allocate memory for rx_buffer structures. Since we use one
2788 * rx_buffer per received packet, the maximum number of rx_buffer's
2789 * that we'll need is equal to the number of receive descriptors
2790 * that we've allocated.
2792 **********************************************************************/
2794 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2796 struct adapter *adapter = rxr->adapter;
2797 device_t dev = adapter->dev;
2798 struct ixv_rx_buf *rxbuf;
2799 int i, bsize, error;
2801 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2802 if (!(rxr->rx_buffers =
2803 (struct ixv_rx_buf *) malloc(bsize,
2804 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2805 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2810 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2811 1, 0, /* alignment, bounds */
2812 BUS_SPACE_MAXADDR, /* lowaddr */
2813 BUS_SPACE_MAXADDR, /* highaddr */
2814 NULL, NULL, /* filter, filterarg */
2815 MSIZE, /* maxsize */
2817 MSIZE, /* maxsegsize */
2819 NULL, /* lockfunc */
2820 NULL, /* lockfuncarg */
2822 device_printf(dev, "Unable to create RX DMA tag\n");
2826 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2827 1, 0, /* alignment, bounds */
2828 BUS_SPACE_MAXADDR, /* lowaddr */
2829 BUS_SPACE_MAXADDR, /* highaddr */
2830 NULL, NULL, /* filter, filterarg */
2831 MJUMPAGESIZE, /* maxsize */
2833 MJUMPAGESIZE, /* maxsegsize */
2835 NULL, /* lockfunc */
2836 NULL, /* lockfuncarg */
2838 device_printf(dev, "Unable to create RX DMA tag\n");
2842 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2843 rxbuf = &rxr->rx_buffers[i];
2844 error = bus_dmamap_create(rxr->htag,
2845 BUS_DMA_NOWAIT, &rxbuf->hmap);
2847 device_printf(dev, "Unable to create RX head map\n");
2850 error = bus_dmamap_create(rxr->ptag,
2851 BUS_DMA_NOWAIT, &rxbuf->pmap);
2853 device_printf(dev, "Unable to create RX pkt map\n");
2861 /* Frees all, but can handle partial completion */
2862 ixv_free_receive_structures(adapter);
2867 ixv_free_receive_ring(struct rx_ring *rxr)
2869 struct adapter *adapter;
2870 struct ixv_rx_buf *rxbuf;
2873 adapter = rxr->adapter;
2874 for (i = 0; i < adapter->num_rx_desc; i++) {
2875 rxbuf = &rxr->rx_buffers[i];
2876 if (rxbuf->m_head != NULL) {
2877 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2878 BUS_DMASYNC_POSTREAD);
2879 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2880 rxbuf->m_head->m_flags |= M_PKTHDR;
2881 m_freem(rxbuf->m_head);
2883 if (rxbuf->m_pack != NULL) {
2884 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2885 BUS_DMASYNC_POSTREAD);
2886 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2887 rxbuf->m_pack->m_flags |= M_PKTHDR;
2888 m_freem(rxbuf->m_pack);
2890 rxbuf->m_head = NULL;
2891 rxbuf->m_pack = NULL;
2896 /*********************************************************************
2898 * Initialize a receive ring and its buffers.
2900 **********************************************************************/
2902 ixv_setup_receive_ring(struct rx_ring *rxr)
2904 struct adapter *adapter;
2907 struct ixv_rx_buf *rxbuf;
2908 bus_dma_segment_t pseg[1], hseg[1];
2909 struct lro_ctrl *lro = &rxr->lro;
2910 int rsize, nsegs, error = 0;
2912 adapter = rxr->adapter;
2916 /* Clear the ring contents */
2918 rsize = roundup2(adapter->num_rx_desc *
2919 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2920 bzero((void *)rxr->rx_base, rsize);
2922 /* Free current RX buffer structs and their mbufs */
2923 ixv_free_receive_ring(rxr);
2925 /* Configure header split? */
2926 if (ixv_header_split)
2927 rxr->hdr_split = TRUE;
2929 /* Now replenish the mbufs */
2930 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2931 struct mbuf *mh, *mp;
2933 rxbuf = &rxr->rx_buffers[j];
2935 ** Dont allocate mbufs if not
2936 ** doing header split, its wasteful
2938 if (rxr->hdr_split == FALSE)
2941 /* First the header */
2942 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2943 if (rxbuf->m_head == NULL) {
2947 m_adj(rxbuf->m_head, ETHER_ALIGN);
2949 mh->m_len = mh->m_pkthdr.len = MHLEN;
2950 mh->m_flags |= M_PKTHDR;
2951 /* Get the memory mapping */
2952 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2953 rxbuf->hmap, rxbuf->m_head, hseg,
2954 &nsegs, BUS_DMA_NOWAIT);
2955 if (error != 0) /* Nothing elegant to do here */
2957 bus_dmamap_sync(rxr->htag,
2958 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2959 /* Update descriptor */
2960 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2963 /* Now the payload cluster */
2964 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2965 M_PKTHDR, adapter->rx_mbuf_sz);
2966 if (rxbuf->m_pack == NULL) {
2971 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2972 /* Get the memory mapping */
2973 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2974 rxbuf->pmap, mp, pseg,
2975 &nsegs, BUS_DMA_NOWAIT);
2978 bus_dmamap_sync(rxr->ptag,
2979 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2980 /* Update descriptor */
2981 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2985 /* Setup our descriptor indices */
2986 rxr->next_to_check = 0;
2987 rxr->next_to_refresh = 0;
2988 rxr->lro_enabled = FALSE;
2989 rxr->rx_split_packets = 0;
2991 rxr->discard = FALSE;
2993 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2994 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2997 ** Now set up the LRO interface:
2999 if (ifp->if_capenable & IFCAP_LRO) {
3000 int err = tcp_lro_init(lro);
3002 device_printf(dev, "LRO Initialization failed!\n");
3005 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3006 rxr->lro_enabled = TRUE;
3007 lro->ifp = adapter->ifp;
3014 ixv_free_receive_ring(rxr);
3019 /*********************************************************************
3021 * Initialize all receive rings.
3023 **********************************************************************/
3025 ixv_setup_receive_structures(struct adapter *adapter)
3027 struct rx_ring *rxr = adapter->rx_rings;
3030 for (j = 0; j < adapter->num_queues; j++, rxr++)
3031 if (ixv_setup_receive_ring(rxr))
3037 * Free RX buffers allocated so far, we will only handle
3038 * the rings that completed, the failing case will have
3039 * cleaned up for itself. 'j' failed, so its the terminus.
3041 for (int i = 0; i < j; ++i) {
3042 rxr = &adapter->rx_rings[i];
3043 ixv_free_receive_ring(rxr);
3049 /*********************************************************************
3051 * Setup receive registers and features.
3053 **********************************************************************/
3054 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3057 ixv_initialize_receive_units(struct adapter *adapter)
3059 struct rx_ring *rxr = adapter->rx_rings;
3060 struct ixgbe_hw *hw = &adapter->hw;
3061 struct ifnet *ifp = adapter->ifp;
3062 u32 bufsz, fctrl, rxcsum, hlreg;
3065 /* Enable broadcasts */
3066 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3067 fctrl |= IXGBE_FCTRL_BAM;
3068 fctrl |= IXGBE_FCTRL_DPF;
3069 fctrl |= IXGBE_FCTRL_PMCF;
3070 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3072 /* Set for Jumbo Frames? */
3073 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3074 if (ifp->if_mtu > ETHERMTU) {
3075 hlreg |= IXGBE_HLREG0_JUMBOEN;
3076 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3078 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3079 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3081 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3083 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3084 u64 rdba = rxr->rxdma.dma_paddr;
3087 /* Do the queue enabling first */
3088 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3089 rxdctl |= IXGBE_RXDCTL_ENABLE;
3090 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3091 for (int k = 0; k < 10; k++) {
3092 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3093 IXGBE_RXDCTL_ENABLE)
3100 /* Setup the Base and Length of the Rx Descriptor Ring */
3101 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3102 (rdba & 0x00000000ffffffffULL));
3103 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3105 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3106 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3108 /* Set up the SRRCTL register */
3109 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3110 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3111 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3113 if (rxr->hdr_split) {
3114 /* Use a standard mbuf for the header */
3115 reg |= ((IXV_RX_HDR <<
3116 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3117 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3118 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3120 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3121 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3123 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3124 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3125 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3126 adapter->num_rx_desc - 1);
3129 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3131 if (ifp->if_capenable & IFCAP_RXCSUM)
3132 rxcsum |= IXGBE_RXCSUM_PCSD;
3134 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3135 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3137 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3142 /*********************************************************************
3144 * Free all receive rings.
3146 **********************************************************************/
3148 ixv_free_receive_structures(struct adapter *adapter)
3150 struct rx_ring *rxr = adapter->rx_rings;
3152 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3153 struct lro_ctrl *lro = &rxr->lro;
3154 ixv_free_receive_buffers(rxr);
3155 /* Free LRO memory */
3157 /* Free the ring memory as well */
3158 ixv_dma_free(adapter, &rxr->rxdma);
3161 free(adapter->rx_rings, M_DEVBUF);
3165 /*********************************************************************
3167 * Free receive ring data structures
3169 **********************************************************************/
3171 ixv_free_receive_buffers(struct rx_ring *rxr)
3173 struct adapter *adapter = rxr->adapter;
3174 struct ixv_rx_buf *rxbuf;
3176 INIT_DEBUGOUT("free_receive_structures: begin");
3178 /* Cleanup any existing buffers */
3179 if (rxr->rx_buffers != NULL) {
3180 for (int i = 0; i < adapter->num_rx_desc; i++) {
3181 rxbuf = &rxr->rx_buffers[i];
3182 if (rxbuf->m_head != NULL) {
3183 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3184 BUS_DMASYNC_POSTREAD);
3185 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3186 rxbuf->m_head->m_flags |= M_PKTHDR;
3187 m_freem(rxbuf->m_head);
3189 if (rxbuf->m_pack != NULL) {
3190 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3191 BUS_DMASYNC_POSTREAD);
3192 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3193 rxbuf->m_pack->m_flags |= M_PKTHDR;
3194 m_freem(rxbuf->m_pack);
3196 rxbuf->m_head = NULL;
3197 rxbuf->m_pack = NULL;
3198 if (rxbuf->hmap != NULL) {
3199 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3202 if (rxbuf->pmap != NULL) {
3203 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3207 if (rxr->rx_buffers != NULL) {
3208 free(rxr->rx_buffers, M_DEVBUF);
3209 rxr->rx_buffers = NULL;
3213 if (rxr->htag != NULL) {
3214 bus_dma_tag_destroy(rxr->htag);
3217 if (rxr->ptag != NULL) {
3218 bus_dma_tag_destroy(rxr->ptag);
3225 static __inline void
3226 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3230 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3231 * should be computed by hardware. Also it should not have VLAN tag in
3234 if (rxr->lro_enabled &&
3235 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3236 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3237 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3238 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3239 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3240 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3242 * Send to the stack if:
3243 ** - LRO not enabled, or
3244 ** - no LRO resources, or
3245 ** - lro enqueue fails
3247 if (rxr->lro.lro_cnt != 0)
3248 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3252 (*ifp->if_input)(ifp, m);
3256 static __inline void
3257 ixv_rx_discard(struct rx_ring *rxr, int i)
3259 struct ixv_rx_buf *rbuf;
3261 rbuf = &rxr->rx_buffers[i];
3263 if (rbuf->fmp != NULL) {/* Partial chain ? */
3264 rbuf->fmp->m_flags |= M_PKTHDR;
3270 ** With advanced descriptors the writeback
3271 ** clobbers the buffer addrs, so its easier
3272 ** to just free the existing mbufs and take
3273 ** the normal refresh path to get new buffers
3277 m_free(rbuf->m_head);
3278 rbuf->m_head = NULL;
3282 m_free(rbuf->m_pack);
3283 rbuf->m_pack = NULL;
3290 /*********************************************************************
3292 * This routine executes in interrupt context. It replenishes
3293 * the mbufs in the descriptor and sends data which has been
3294 * dma'ed into host memory to upper layer.
3296 * We loop at most count times if count is > 0, or until done if
3299 * Return TRUE for more work, FALSE for all clean.
3300 *********************************************************************/
3302 ixv_rxeof(struct ix_queue *que, int count)
3304 struct adapter *adapter = que->adapter;
3305 struct rx_ring *rxr = que->rxr;
3306 struct ifnet *ifp = adapter->ifp;
3307 struct lro_ctrl *lro = &rxr->lro;
3308 struct lro_entry *queued;
3309 int i, nextp, processed = 0;
3311 union ixgbe_adv_rx_desc *cur;
3312 struct ixv_rx_buf *rbuf, *nbuf;
3316 for (i = rxr->next_to_check; count != 0;) {
3317 struct mbuf *sendmp, *mh, *mp;
3319 u16 hlen, plen, hdr, vtag;
3322 /* Sync the ring. */
3323 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3324 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3326 cur = &rxr->rx_base[i];
3327 staterr = le32toh(cur->wb.upper.status_error);
3329 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3331 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3338 cur->wb.upper.status_error = 0;
3339 rbuf = &rxr->rx_buffers[i];
3343 plen = le16toh(cur->wb.upper.length);
3344 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3345 IXGBE_RXDADV_PKTTYPE_MASK;
3346 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3347 vtag = le16toh(cur->wb.upper.vlan);
3348 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3350 /* Make sure all parts of a bad packet are discarded */
3351 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3354 rxr->rx_discarded++;
3356 rxr->discard = TRUE;
3358 rxr->discard = FALSE;
3359 ixv_rx_discard(rxr, i);
3365 if (nextp == adapter->num_rx_desc)
3367 nbuf = &rxr->rx_buffers[nextp];
3371 ** The header mbuf is ONLY used when header
3372 ** split is enabled, otherwise we get normal
3373 ** behavior, ie, both header and payload
3374 ** are DMA'd into the payload buffer.
3376 ** Rather than using the fmp/lmp global pointers
3377 ** we now keep the head of a packet chain in the
3378 ** buffer struct and pass this along from one
3379 ** descriptor to the next, until we get EOP.
3381 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3382 /* This must be an initial descriptor */
3383 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3384 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3385 if (hlen > IXV_RX_HDR)
3388 mh->m_flags |= M_PKTHDR;
3390 mh->m_pkthdr.len = mh->m_len;
3391 /* Null buf pointer so it is refreshed */
3392 rbuf->m_head = NULL;
3394 ** Check the payload length, this
3395 ** could be zero if its a small
3401 mp->m_flags &= ~M_PKTHDR;
3403 mh->m_pkthdr.len += mp->m_len;
3404 /* Null buf pointer so it is refreshed */
3405 rbuf->m_pack = NULL;
3406 rxr->rx_split_packets++;
3409 ** Now create the forward
3410 ** chain so when complete
3414 /* stash the chain head */
3416 /* Make forward chain */
3418 mp->m_next = nbuf->m_pack;
3420 mh->m_next = nbuf->m_pack;
3422 /* Singlet, prepare to send */
3424 if ((adapter->num_vlans) &&
3425 (staterr & IXGBE_RXD_STAT_VP)) {
3426 sendmp->m_pkthdr.ether_vtag = vtag;
3427 sendmp->m_flags |= M_VLANTAG;
3432 ** Either no header split, or a
3433 ** secondary piece of a fragmented
3438 ** See if there is a stored head
3439 ** that determines what we are
3442 rbuf->m_pack = rbuf->fmp = NULL;
3444 if (sendmp != NULL) /* secondary frag */
3445 sendmp->m_pkthdr.len += mp->m_len;
3447 /* first desc of a non-ps chain */
3449 sendmp->m_flags |= M_PKTHDR;
3450 sendmp->m_pkthdr.len = mp->m_len;
3451 if (staterr & IXGBE_RXD_STAT_VP) {
3452 sendmp->m_pkthdr.ether_vtag = vtag;
3453 sendmp->m_flags |= M_VLANTAG;
3456 /* Pass the head pointer on */
3460 mp->m_next = nbuf->m_pack;
3464 /* Sending this frame? */
3466 sendmp->m_pkthdr.rcvif = ifp;
3469 /* capture data for AIM */
3470 rxr->bytes += sendmp->m_pkthdr.len;
3471 rxr->rx_bytes += sendmp->m_pkthdr.len;
3472 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3473 ixv_rx_checksum(staterr, sendmp, ptype);
3474 #if __FreeBSD_version >= 800000
3475 sendmp->m_pkthdr.flowid = que->msix;
3476 sendmp->m_flags |= M_FLOWID;
3480 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3481 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3483 /* Advance our pointers to the next descriptor. */
3484 if (++i == adapter->num_rx_desc)
3487 /* Now send to the stack or do LRO */
3489 ixv_rx_input(rxr, ifp, sendmp, ptype);
3491 /* Every 8 descriptors we go to refresh mbufs */
3492 if (processed == 8) {
3493 ixv_refresh_mbufs(rxr, i);
3498 /* Refresh any remaining buf structs */
3499 if (ixv_rx_unrefreshed(rxr))
3500 ixv_refresh_mbufs(rxr, i);
3502 rxr->next_to_check = i;
3505 * Flush any outstanding LRO work
3507 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3508 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3509 tcp_lro_flush(lro, queued);
3515 ** We still have cleaning to do?
3516 ** Schedule another interrupt if so.
3518 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3519 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3527 /*********************************************************************
3529 * Verify that the hardware indicated that the checksum is valid.
3530 * Inform the stack about the status of checksum so that stack
3531 * doesn't spend time verifying the checksum.
3533 *********************************************************************/
3535 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3537 u16 status = (u16) staterr;
3538 u8 errors = (u8) (staterr >> 24);
3541 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3542 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3545 if (status & IXGBE_RXD_STAT_IPCS) {
3546 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3547 /* IP Checksum Good */
3548 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3549 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3552 mp->m_pkthdr.csum_flags = 0;
3554 if (status & IXGBE_RXD_STAT_L4CS) {
3555 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3556 #if __FreeBSD_version >= 800000
3558 type = CSUM_SCTP_VALID;
3560 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3561 mp->m_pkthdr.csum_flags |= type;
3563 mp->m_pkthdr.csum_data = htons(0xffff);
3570 ixv_setup_vlan_support(struct adapter *adapter)
3572 struct ixgbe_hw *hw = &adapter->hw;
3573 u32 ctrl, vid, vfta, retry;
3577 ** We get here thru init_locked, meaning
3578 ** a soft reset, this has already cleared
3579 ** the VFTA and other state, so if there
3580 ** have been no vlan's registered do nothing.
3582 if (adapter->num_vlans == 0)
3585 /* Enable the queues */
3586 for (int i = 0; i < adapter->num_queues; i++) {
3587 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3588 ctrl |= IXGBE_RXDCTL_VME;
3589 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3593 ** A soft reset zero's out the VFTA, so
3594 ** we need to repopulate it now.
3596 for (int i = 0; i < VFTA_SIZE; i++) {
3597 if (ixv_shadow_vfta[i] == 0)
3599 vfta = ixv_shadow_vfta[i];
3601 ** Reconstruct the vlan id's
3602 ** based on the bits set in each
3603 ** of the array ints.
3605 for ( int j = 0; j < 32; j++) {
3607 if ((vfta & (1 << j)) == 0)
3610 /* Call the shared code mailbox routine */
3611 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3620 ** This routine is run via an vlan config EVENT,
3621 ** it enables us to use the HW Filter table since
3622 ** we can get the vlan id. This just creates the
3623 ** entry in the soft version of the VFTA, init will
3624 ** repopulate the real table.
3627 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3629 struct adapter *adapter = ifp->if_softc;
3632 if (ifp->if_softc != arg) /* Not our event */
3635 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3638 IXV_CORE_LOCK(adapter);
3639 index = (vtag >> 5) & 0x7F;
3641 ixv_shadow_vfta[index] |= (1 << bit);
3642 ++adapter->num_vlans;
3643 /* Re-init to load the changes */
3644 ixv_init_locked(adapter);
3645 IXV_CORE_UNLOCK(adapter);
3649 ** This routine is run via an vlan
3650 ** unconfig EVENT, remove our entry
3651 ** in the soft vfta.
3654 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3656 struct adapter *adapter = ifp->if_softc;
3659 if (ifp->if_softc != arg)
3662 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3665 IXV_CORE_LOCK(adapter);
3666 index = (vtag >> 5) & 0x7F;
3668 ixv_shadow_vfta[index] &= ~(1 << bit);
3669 --adapter->num_vlans;
3670 /* Re-init to load the changes */
3671 ixv_init_locked(adapter);
3672 IXV_CORE_UNLOCK(adapter);
3676 ixv_enable_intr(struct adapter *adapter)
3678 struct ixgbe_hw *hw = &adapter->hw;
3679 struct ix_queue *que = adapter->queues;
3680 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3683 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3685 mask = IXGBE_EIMS_ENABLE_MASK;
3686 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3687 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3689 for (int i = 0; i < adapter->num_queues; i++, que++)
3690 ixv_enable_queue(adapter, que->msix);
3692 IXGBE_WRITE_FLUSH(hw);
3698 ixv_disable_intr(struct adapter *adapter)
3700 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3701 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3702 IXGBE_WRITE_FLUSH(&adapter->hw);
3707 ** Setup the correct IVAR register for a particular MSIX interrupt
3708 ** - entry is the register array entry
3709 ** - vector is the MSIX vector for this queue
3710 ** - type is RX/TX/MISC
3713 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3715 struct ixgbe_hw *hw = &adapter->hw;
3718 vector |= IXGBE_IVAR_ALLOC_VAL;
3720 if (type == -1) { /* MISC IVAR */
3721 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3724 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3725 } else { /* RX/TX IVARS */
3726 index = (16 * (entry & 1)) + (8 * type);
3727 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3728 ivar &= ~(0xFF << index);
3729 ivar |= (vector << index);
3730 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3735 ixv_configure_ivars(struct adapter *adapter)
3737 struct ix_queue *que = adapter->queues;
3739 for (int i = 0; i < adapter->num_queues; i++, que++) {
3740 /* First the RX queue entry */
3741 ixv_set_ivar(adapter, i, que->msix, 0);
3742 /* ... and the TX */
3743 ixv_set_ivar(adapter, i, que->msix, 1);
3744 /* Set an initial value in EITR */
3745 IXGBE_WRITE_REG(&adapter->hw,
3746 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3749 /* For the Link interrupt */
3750 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3755 ** Tasklet handler for MSIX MBX interrupts
3756 ** - do outside interrupt since it might sleep
3759 ixv_handle_mbx(void *context, int pending)
3761 struct adapter *adapter = context;
3763 ixgbe_check_link(&adapter->hw,
3764 &adapter->link_speed, &adapter->link_up, 0);
3765 ixv_update_link_status(adapter);
3769 ** The VF stats registers never have a truely virgin
3770 ** starting point, so this routine tries to make an
3771 ** artificial one, marking ground zero on attach as
3775 ixv_save_stats(struct adapter *adapter)
3777 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3778 adapter->stats.saved_reset_vfgprc +=
3779 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3780 adapter->stats.saved_reset_vfgptc +=
3781 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3782 adapter->stats.saved_reset_vfgorc +=
3783 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3784 adapter->stats.saved_reset_vfgotc +=
3785 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3786 adapter->stats.saved_reset_vfmprc +=
3787 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3792 ixv_init_stats(struct adapter *adapter)
3794 struct ixgbe_hw *hw = &adapter->hw;
3796 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3797 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3798 adapter->stats.last_vfgorc |=
3799 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3801 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3802 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3803 adapter->stats.last_vfgotc |=
3804 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3806 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3808 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3809 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3810 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3811 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3812 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3815 #define UPDATE_STAT_32(reg, last, count) \
3817 u32 current = IXGBE_READ_REG(hw, reg); \
3818 if (current < last) \
3819 count += 0x100000000LL; \
3821 count &= 0xFFFFFFFF00000000LL; \
3825 #define UPDATE_STAT_36(lsb, msb, last, count) \
3827 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3828 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3829 u64 current = ((cur_msb << 32) | cur_lsb); \
3830 if (current < last) \
3831 count += 0x1000000000LL; \
3833 count &= 0xFFFFFFF000000000LL; \
3838 ** ixv_update_stats - Update the board statistics counters.
3841 ixv_update_stats(struct adapter *adapter)
3843 struct ixgbe_hw *hw = &adapter->hw;
3845 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3846 adapter->stats.vfgprc);
3847 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3848 adapter->stats.vfgptc);
3849 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3850 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3851 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3852 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3853 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3854 adapter->stats.vfmprc);
3857 /**********************************************************************
3859 * This routine is called only when ixgbe_display_debug_stats is enabled.
3860 * This routine provides a way to take a look at important statistics
3861 * maintained by the driver and hardware.
3863 **********************************************************************/
3865 ixv_print_hw_stats(struct adapter * adapter)
3867 device_t dev = adapter->dev;
3869 device_printf(dev,"Std Mbuf Failed = %lu\n",
3870 adapter->mbuf_defrag_failed);
3871 device_printf(dev,"Driver dropped packets = %lu\n",
3872 adapter->dropped_pkts);
3873 device_printf(dev, "watchdog timeouts = %ld\n",
3874 adapter->watchdog_events);
3876 device_printf(dev,"Good Packets Rcvd = %llu\n",
3877 (long long)adapter->stats.vfgprc);
3878 device_printf(dev,"Good Packets Xmtd = %llu\n",
3879 (long long)adapter->stats.vfgptc);
3880 device_printf(dev,"TSO Transmissions = %lu\n",
3885 /**********************************************************************
3887 * This routine is called only when em_display_debug_stats is enabled.
3888 * This routine provides a way to take a look at important statistics
3889 * maintained by the driver and hardware.
3891 **********************************************************************/
3893 ixv_print_debug_info(struct adapter *adapter)
3895 device_t dev = adapter->dev;
3896 struct ixgbe_hw *hw = &adapter->hw;
3897 struct ix_queue *que = adapter->queues;
3898 struct rx_ring *rxr;
3899 struct tx_ring *txr;
3900 struct lro_ctrl *lro;
3902 device_printf(dev,"Error Byte Count = %u \n",
3903 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3905 for (int i = 0; i < adapter->num_queues; i++, que++) {
3909 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3910 que->msix, (long)que->irqs);
3911 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3912 rxr->me, (long long)rxr->rx_packets);
3913 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3914 rxr->me, (long long)rxr->rx_split_packets);
3915 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3916 rxr->me, (long)rxr->rx_bytes);
3917 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3918 rxr->me, lro->lro_queued);
3919 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3920 rxr->me, lro->lro_flushed);
3921 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3922 txr->me, (long)txr->total_packets);
3923 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3924 txr->me, (long)txr->no_desc_avail);
3927 device_printf(dev,"MBX IRQ Handled: %lu\n",
3928 (long)adapter->mbx_irq);
3933 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3937 struct adapter *adapter;
3940 error = sysctl_handle_int(oidp, &result, 0, req);
3942 if (error || !req->newptr)
3946 adapter = (struct adapter *) arg1;
3947 ixv_print_hw_stats(adapter);
3953 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3956 struct adapter *adapter;
3959 error = sysctl_handle_int(oidp, &result, 0, req);
3961 if (error || !req->newptr)
3965 adapter = (struct adapter *) arg1;
3966 ixv_print_debug_info(adapter);
3972 ** Set flow control using sysctl:
3973 ** Flow control values:
3980 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3983 struct adapter *adapter;
3985 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3990 adapter = (struct adapter *) arg1;
3991 switch (ixv_flow_control) {
3992 case ixgbe_fc_rx_pause:
3993 case ixgbe_fc_tx_pause:
3995 adapter->hw.fc.requested_mode = ixv_flow_control;
3999 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4002 ixgbe_fc_enable(&adapter->hw);
4007 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4008 const char *description, int *limit, int value)
4011 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4012 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4013 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);