1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
44 *********************************************************************/
45 char ixv_driver_version[] = "1.1.2";
47 /*********************************************************************
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
57 static ixv_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 /* required last entry */
65 /*********************************************************************
66 * Table of branding strings
67 *********************************************************************/
69 static char *ixv_strings[] = {
70 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 /*********************************************************************
75 *********************************************************************/
76 static int ixv_probe(device_t);
77 static int ixv_attach(device_t);
78 static int ixv_detach(device_t);
79 static int ixv_shutdown(device_t);
80 #if __FreeBSD_version < 800000
81 static void ixv_start(struct ifnet *);
82 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
84 static int ixv_mq_start(struct ifnet *, struct mbuf *);
85 static int ixv_mq_start_locked(struct ifnet *,
86 struct tx_ring *, struct mbuf *);
87 static void ixv_qflush(struct ifnet *);
89 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
90 static void ixv_init(void *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_stop(void *);
93 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
94 static int ixv_media_change(struct ifnet *);
95 static void ixv_identify_hardware(struct adapter *);
96 static int ixv_allocate_pci_resources(struct adapter *);
97 static int ixv_allocate_msix(struct adapter *);
98 static int ixv_allocate_queues(struct adapter *);
99 static int ixv_setup_msix(struct adapter *);
100 static void ixv_free_pci_resources(struct adapter *);
101 static void ixv_local_timer(void *);
102 static void ixv_setup_interface(device_t, struct adapter *);
103 static void ixv_config_link(struct adapter *);
105 static int ixv_allocate_transmit_buffers(struct tx_ring *);
106 static int ixv_setup_transmit_structures(struct adapter *);
107 static void ixv_setup_transmit_ring(struct tx_ring *);
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_free_transmit_structures(struct adapter *);
110 static void ixv_free_transmit_buffers(struct tx_ring *);
112 static int ixv_allocate_receive_buffers(struct rx_ring *);
113 static int ixv_setup_receive_structures(struct adapter *);
114 static int ixv_setup_receive_ring(struct rx_ring *);
115 static void ixv_initialize_receive_units(struct adapter *);
116 static void ixv_free_receive_structures(struct adapter *);
117 static void ixv_free_receive_buffers(struct rx_ring *);
119 static void ixv_enable_intr(struct adapter *);
120 static void ixv_disable_intr(struct adapter *);
121 static bool ixv_txeof(struct tx_ring *);
122 static bool ixv_rxeof(struct ix_queue *, int);
123 static void ixv_rx_checksum(u32, struct mbuf *, u32);
124 static void ixv_set_multi(struct adapter *);
125 static void ixv_update_link_status(struct adapter *);
126 static void ixv_refresh_mbufs(struct rx_ring *, int);
127 static int ixv_xmit(struct tx_ring *, struct mbuf **);
128 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
129 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
130 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
131 static int ixv_dma_malloc(struct adapter *, bus_size_t,
132 struct ixv_dma_alloc *, int);
133 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
134 static void ixv_add_rx_process_limit(struct adapter *, const char *,
135 const char *, int *, int);
136 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
137 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
138 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
139 static void ixv_configure_ivars(struct adapter *);
140 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
142 static void ixv_setup_vlan_support(struct adapter *);
143 static void ixv_register_vlan(void *, struct ifnet *, u16);
144 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
146 static void ixv_save_stats(struct adapter *);
147 static void ixv_init_stats(struct adapter *);
148 static void ixv_update_stats(struct adapter *);
150 static __inline void ixv_rx_discard(struct rx_ring *, int);
151 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
154 /* The MSI/X Interrupt handlers */
155 static void ixv_msix_que(void *);
156 static void ixv_msix_mbx(void *);
158 /* Deferred interrupt tasklets */
159 static void ixv_handle_que(void *, int);
160 static void ixv_handle_mbx(void *, int);
162 /*********************************************************************
163 * FreeBSD Device Interface Entry Points
164 *********************************************************************/
166 static device_method_t ixv_methods[] = {
167 /* Device interface */
168 DEVMETHOD(device_probe, ixv_probe),
169 DEVMETHOD(device_attach, ixv_attach),
170 DEVMETHOD(device_detach, ixv_detach),
171 DEVMETHOD(device_shutdown, ixv_shutdown),
175 static driver_t ixv_driver = {
176 "ix", ixv_methods, sizeof(struct adapter),
179 extern devclass_t ixgbe_devclass;
180 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
181 MODULE_DEPEND(ixv, pci, 1, 1, 1);
182 MODULE_DEPEND(ixv, ether, 1, 1, 1);
185 ** TUNEABLE PARAMETERS:
189 ** AIM: Adaptive Interrupt Moderation
190 ** which means that the interrupt rate
191 ** is varied over time based on the
192 ** traffic for that interrupt vector
194 static int ixv_enable_aim = FALSE;
195 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
197 /* How many packets rxeof tries to clean at a time */
198 static int ixv_rx_process_limit = 128;
199 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201 /* Flow control setting, default to full */
202 static int ixv_flow_control = ixgbe_fc_full;
203 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
206 * Header split: this causes the hardware to DMA
207 * the header into a seperate mbuf from the payload,
208 * it can be a performance win in some workloads, but
209 * in others it actually hurts, its off by default.
211 static int ixv_header_split = FALSE;
212 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
215 ** Number of TX descriptors per ring,
216 ** setting higher than RX as this seems
217 ** the better performing choice.
219 static int ixv_txd = DEFAULT_TXD;
220 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
222 /* Number of RX descriptors per ring */
223 static int ixv_rxd = DEFAULT_RXD;
224 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
227 ** Shadow VFTA table, this is needed because
228 ** the real filter table gets cleared during
229 ** a soft reset and we need to repopulate it.
231 static u32 ixv_shadow_vfta[VFTA_SIZE];
233 /*********************************************************************
234 * Device identification routine
236 * ixv_probe determines if the driver should be loaded on
237 * adapter based on PCI vendor/device id of the adapter.
239 * return BUS_PROBE_DEFAULT on success, positive on failure
240 *********************************************************************/
243 ixv_probe(device_t dev)
245 ixv_vendor_info_t *ent;
247 u16 pci_vendor_id = 0;
248 u16 pci_device_id = 0;
249 u16 pci_subvendor_id = 0;
250 u16 pci_subdevice_id = 0;
251 char adapter_name[256];
254 pci_vendor_id = pci_get_vendor(dev);
255 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
258 pci_device_id = pci_get_device(dev);
259 pci_subvendor_id = pci_get_subvendor(dev);
260 pci_subdevice_id = pci_get_subdevice(dev);
262 ent = ixv_vendor_info_array;
263 while (ent->vendor_id != 0) {
264 if ((pci_vendor_id == ent->vendor_id) &&
265 (pci_device_id == ent->device_id) &&
267 ((pci_subvendor_id == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
270 ((pci_subdevice_id == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 sprintf(adapter_name, "%s, Version - %s",
273 ixv_strings[ent->index],
275 device_set_desc_copy(dev, adapter_name);
276 return (BUS_PROBE_DEFAULT);
283 /*********************************************************************
284 * Device initialization routine
286 * The attach entry point is called when the driver is being loaded.
287 * This routine identifies the type of hardware, allocates all resources
288 * and initializes the hardware.
290 * return 0 on success, positive on failure
291 *********************************************************************/
294 ixv_attach(device_t dev)
296 struct adapter *adapter;
300 INIT_DEBUGOUT("ixv_attach: begin");
302 if (resource_disabled("ixgbe", device_get_unit(dev))) {
303 device_printf(dev, "Disabled by device hint\n");
307 /* Allocate, clear, and link in our adapter structure */
308 adapter = device_get_softc(dev);
309 adapter->dev = adapter->osdep.dev = dev;
313 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
316 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
317 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
318 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
319 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
321 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
322 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
323 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
324 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
326 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
329 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
331 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
332 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
333 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
334 &ixv_enable_aim, 1, "Interrupt Moderation");
336 /* Set up the timer callout */
337 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
339 /* Determine hardware revision */
340 ixv_identify_hardware(adapter);
342 /* Do base PCI setup - map BAR0 */
343 if (ixv_allocate_pci_resources(adapter)) {
344 device_printf(dev, "Allocation of PCI resources failed\n");
349 /* Do descriptor calc and sanity checks */
350 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
351 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
352 device_printf(dev, "TXD config issue, using default!\n");
353 adapter->num_tx_desc = DEFAULT_TXD;
355 adapter->num_tx_desc = ixv_txd;
357 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
358 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
359 device_printf(dev, "RXD config issue, using default!\n");
360 adapter->num_rx_desc = DEFAULT_RXD;
362 adapter->num_rx_desc = ixv_rxd;
364 /* Allocate our TX/RX Queues */
365 if (ixv_allocate_queues(adapter)) {
371 ** Initialize the shared code: its
372 ** at this point the mac type is set.
374 error = ixgbe_init_shared_code(hw);
376 device_printf(dev,"Shared Code Initialization Failure\n");
381 /* Setup the mailbox */
382 ixgbe_init_mbx_params_vf(hw);
386 /* Get Hardware Flow Control setting */
387 hw->fc.requested_mode = ixgbe_fc_full;
388 hw->fc.pause_time = IXV_FC_PAUSE;
389 hw->fc.low_water = IXV_FC_LO;
390 hw->fc.high_water[0] = IXV_FC_HI;
391 hw->fc.send_xon = TRUE;
393 error = ixgbe_init_hw(hw);
395 device_printf(dev,"Hardware Initialization Failure\n");
400 error = ixv_allocate_msix(adapter);
404 /* Setup OS specific network interface */
405 ixv_setup_interface(dev, adapter);
407 /* Sysctl for limiting the amount of work done in the taskqueue */
408 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
409 "max number of rx packets to process", &adapter->rx_process_limit,
410 ixv_rx_process_limit);
412 /* Do the stats setup */
413 ixv_save_stats(adapter);
414 ixv_init_stats(adapter);
416 /* Register for VLAN events */
417 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
418 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
419 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
420 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
422 INIT_DEBUGOUT("ixv_attach: end");
426 ixv_free_transmit_structures(adapter);
427 ixv_free_receive_structures(adapter);
429 ixv_free_pci_resources(adapter);
434 /*********************************************************************
435 * Device removal routine
437 * The detach entry point is called when the driver is being removed.
438 * This routine stops the adapter and deallocates all the resources
439 * that were allocated for driver operation.
441 * return 0 on success, positive on failure
442 *********************************************************************/
445 ixv_detach(device_t dev)
447 struct adapter *adapter = device_get_softc(dev);
448 struct ix_queue *que = adapter->queues;
450 INIT_DEBUGOUT("ixv_detach: begin");
452 /* Make sure VLANS are not using driver */
453 if (adapter->ifp->if_vlantrunk != NULL) {
454 device_printf(dev,"Vlan in use, detach first\n");
458 IXV_CORE_LOCK(adapter);
460 IXV_CORE_UNLOCK(adapter);
462 for (int i = 0; i < adapter->num_queues; i++, que++) {
464 taskqueue_drain(que->tq, &que->que_task);
465 taskqueue_free(que->tq);
469 /* Drain the Link queue */
471 taskqueue_drain(adapter->tq, &adapter->mbx_task);
472 taskqueue_free(adapter->tq);
475 /* Unregister VLAN events */
476 if (adapter->vlan_attach != NULL)
477 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
478 if (adapter->vlan_detach != NULL)
479 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
481 ether_ifdetach(adapter->ifp);
482 callout_drain(&adapter->timer);
483 ixv_free_pci_resources(adapter);
484 bus_generic_detach(dev);
485 if_free(adapter->ifp);
487 ixv_free_transmit_structures(adapter);
488 ixv_free_receive_structures(adapter);
490 IXV_CORE_LOCK_DESTROY(adapter);
494 /*********************************************************************
496 * Shutdown entry point
498 **********************************************************************/
500 ixv_shutdown(device_t dev)
502 struct adapter *adapter = device_get_softc(dev);
503 IXV_CORE_LOCK(adapter);
505 IXV_CORE_UNLOCK(adapter);
509 #if __FreeBSD_version < 800000
510 /*********************************************************************
511 * Transmit entry point
513 * ixv_start is called by the stack to initiate a transmit.
514 * The driver will remain in this routine as long as there are
515 * packets to transmit and transmit resources are available.
516 * In case resources are not available stack is notified and
517 * the packet is requeued.
518 **********************************************************************/
520 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
523 struct adapter *adapter = txr->adapter;
525 IXV_TX_LOCK_ASSERT(txr);
527 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
530 if (!adapter->link_active)
533 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
535 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
539 if (ixv_xmit(txr, &m_head)) {
542 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
543 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
546 /* Send a copy of the frame to the BPF listener */
547 ETHER_BPF_MTAP(ifp, m_head);
549 /* Set watchdog on */
550 txr->watchdog_check = TRUE;
551 txr->watchdog_time = ticks;
558 * Legacy TX start - called by the stack, this
559 * always uses the first tx ring, and should
560 * not be used with multiqueue tx enabled.
563 ixv_start(struct ifnet *ifp)
565 struct adapter *adapter = ifp->if_softc;
566 struct tx_ring *txr = adapter->tx_rings;
568 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
570 ixv_start_locked(txr, ifp);
579 ** Multiqueue Transmit driver
583 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
585 struct adapter *adapter = ifp->if_softc;
586 struct ix_queue *que;
590 /* Which queue to use */
591 if ((m->m_flags & M_FLOWID) != 0)
592 i = m->m_pkthdr.flowid % adapter->num_queues;
594 txr = &adapter->tx_rings[i];
595 que = &adapter->queues[i];
597 if (IXV_TX_TRYLOCK(txr)) {
598 err = ixv_mq_start_locked(ifp, txr, m);
601 err = drbr_enqueue(ifp, txr->br, m);
602 taskqueue_enqueue(que->tq, &que->que_task);
609 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
611 struct adapter *adapter = txr->adapter;
613 int enqueued, err = 0;
615 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
616 IFF_DRV_RUNNING || adapter->link_active == 0) {
618 err = drbr_enqueue(ifp, txr->br, m);
622 /* Do a clean if descriptors are low */
623 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
628 next = drbr_dequeue(ifp, txr->br);
629 } else if (drbr_needs_enqueue(ifp, txr->br)) {
630 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
632 next = drbr_dequeue(ifp, txr->br);
636 /* Process the queue */
637 while (next != NULL) {
638 if ((err = ixv_xmit(txr, &next)) != 0) {
640 err = drbr_enqueue(ifp, txr->br, next);
644 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
645 /* Send a copy of the frame to the BPF listener */
646 ETHER_BPF_MTAP(ifp, next);
647 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
649 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
650 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
653 next = drbr_dequeue(ifp, txr->br);
657 /* Set watchdog on */
658 txr->watchdog_check = TRUE;
659 txr->watchdog_time = ticks;
666 ** Flush all ring buffers
669 ixv_qflush(struct ifnet *ifp)
671 struct adapter *adapter = ifp->if_softc;
672 struct tx_ring *txr = adapter->tx_rings;
675 for (int i = 0; i < adapter->num_queues; i++, txr++) {
677 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
686 /*********************************************************************
689 * ixv_ioctl is called when the user wants to configure the
692 * return 0 on success, positive on failure
693 **********************************************************************/
696 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
698 struct adapter *adapter = ifp->if_softc;
699 struct ifreq *ifr = (struct ifreq *) data;
700 #if defined(INET) || defined(INET6)
701 struct ifaddr *ifa = (struct ifaddr *) data;
702 bool avoid_reset = FALSE;
710 if (ifa->ifa_addr->sa_family == AF_INET)
714 if (ifa->ifa_addr->sa_family == AF_INET6)
717 #if defined(INET) || defined(INET6)
719 ** Calling init results in link renegotiation,
720 ** so we avoid doing it when possible.
723 ifp->if_flags |= IFF_UP;
724 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
726 if (!(ifp->if_flags & IFF_NOARP))
727 arp_ifinit(ifp, ifa);
729 error = ether_ioctl(ifp, command, data);
733 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
734 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
737 IXV_CORE_LOCK(adapter);
738 ifp->if_mtu = ifr->ifr_mtu;
739 adapter->max_frame_size =
740 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
741 ixv_init_locked(adapter);
742 IXV_CORE_UNLOCK(adapter);
746 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
747 IXV_CORE_LOCK(adapter);
748 if (ifp->if_flags & IFF_UP) {
749 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
750 ixv_init_locked(adapter);
752 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
754 adapter->if_flags = ifp->if_flags;
755 IXV_CORE_UNLOCK(adapter);
759 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
760 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
761 IXV_CORE_LOCK(adapter);
762 ixv_disable_intr(adapter);
763 ixv_set_multi(adapter);
764 ixv_enable_intr(adapter);
765 IXV_CORE_UNLOCK(adapter);
770 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
771 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
775 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
776 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
777 if (mask & IFCAP_HWCSUM)
778 ifp->if_capenable ^= IFCAP_HWCSUM;
779 if (mask & IFCAP_TSO4)
780 ifp->if_capenable ^= IFCAP_TSO4;
781 if (mask & IFCAP_LRO)
782 ifp->if_capenable ^= IFCAP_LRO;
783 if (mask & IFCAP_VLAN_HWTAGGING)
784 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
785 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
786 IXV_CORE_LOCK(adapter);
787 ixv_init_locked(adapter);
788 IXV_CORE_UNLOCK(adapter);
790 VLAN_CAPABILITIES(ifp);
795 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
796 error = ether_ioctl(ifp, command, data);
803 /*********************************************************************
806 * This routine is used in two ways. It is used by the stack as
807 * init entry point in network interface structure. It is also used
808 * by the driver as a hw/sw initialization routine to get to a
811 * return 0 on success, positive on failure
812 **********************************************************************/
813 #define IXGBE_MHADD_MFS_SHIFT 16
816 ixv_init_locked(struct adapter *adapter)
818 struct ifnet *ifp = adapter->ifp;
819 device_t dev = adapter->dev;
820 struct ixgbe_hw *hw = &adapter->hw;
823 INIT_DEBUGOUT("ixv_init: begin");
824 mtx_assert(&adapter->core_mtx, MA_OWNED);
825 hw->adapter_stopped = FALSE;
826 ixgbe_stop_adapter(hw);
827 callout_stop(&adapter->timer);
829 /* reprogram the RAR[0] in case user changed it. */
830 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
832 /* Get the latest mac address, User can use a LAA */
833 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
834 IXGBE_ETH_LENGTH_OF_ADDRESS);
835 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
836 hw->addr_ctrl.rar_used_count = 1;
838 /* Prepare transmit descriptors and buffers */
839 if (ixv_setup_transmit_structures(adapter)) {
840 device_printf(dev,"Could not setup transmit structures\n");
846 ixv_initialize_transmit_units(adapter);
848 /* Setup Multicast table */
849 ixv_set_multi(adapter);
852 ** Determine the correct mbuf pool
853 ** for doing jumbo/headersplit
855 if (ifp->if_mtu > ETHERMTU)
856 adapter->rx_mbuf_sz = MJUMPAGESIZE;
858 adapter->rx_mbuf_sz = MCLBYTES;
860 /* Prepare receive descriptors and buffers */
861 if (ixv_setup_receive_structures(adapter)) {
862 device_printf(dev,"Could not setup receive structures\n");
867 /* Configure RX settings */
868 ixv_initialize_receive_units(adapter);
870 /* Enable Enhanced MSIX mode */
871 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
872 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
873 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
874 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
876 /* Set the various hardware offload abilities */
877 ifp->if_hwassist = 0;
878 if (ifp->if_capenable & IFCAP_TSO4)
879 ifp->if_hwassist |= CSUM_TSO;
880 if (ifp->if_capenable & IFCAP_TXCSUM) {
881 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
882 #if __FreeBSD_version >= 800000
883 ifp->if_hwassist |= CSUM_SCTP;
888 if (ifp->if_mtu > ETHERMTU) {
889 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
890 mhadd &= ~IXGBE_MHADD_MFS_MASK;
891 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
892 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
895 /* Set up VLAN offload and filter */
896 ixv_setup_vlan_support(adapter);
898 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
900 /* Set up MSI/X routing */
901 ixv_configure_ivars(adapter);
903 /* Set up auto-mask */
904 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
906 /* Set moderation on the Link interrupt */
907 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
910 ixv_init_stats(adapter);
912 /* Config/Enable Link */
913 ixv_config_link(adapter);
915 /* And now turn on interrupts */
916 ixv_enable_intr(adapter);
918 /* Now inform the stack we're ready */
919 ifp->if_drv_flags |= IFF_DRV_RUNNING;
920 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
928 struct adapter *adapter = arg;
930 IXV_CORE_LOCK(adapter);
931 ixv_init_locked(adapter);
932 IXV_CORE_UNLOCK(adapter);
939 ** MSIX Interrupt Handlers and Tasklets
944 ixv_enable_queue(struct adapter *adapter, u32 vector)
946 struct ixgbe_hw *hw = &adapter->hw;
947 u32 queue = 1 << vector;
950 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
951 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
955 ixv_disable_queue(struct adapter *adapter, u32 vector)
957 struct ixgbe_hw *hw = &adapter->hw;
958 u64 queue = (u64)(1 << vector);
961 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
962 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
966 ixv_rearm_queues(struct adapter *adapter, u64 queues)
968 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
969 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
974 ixv_handle_que(void *context, int pending)
976 struct ix_queue *que = context;
977 struct adapter *adapter = que->adapter;
978 struct tx_ring *txr = que->txr;
979 struct ifnet *ifp = adapter->ifp;
982 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
983 more = ixv_rxeof(que, adapter->rx_process_limit);
986 #if __FreeBSD_version >= 800000
987 if (!drbr_empty(ifp, txr->br))
988 ixv_mq_start_locked(ifp, txr, NULL);
990 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
991 ixv_start_locked(txr, ifp);
995 taskqueue_enqueue(que->tq, &que->que_task);
1000 /* Reenable this interrupt */
1001 ixv_enable_queue(adapter, que->msix);
1005 /*********************************************************************
1007 * MSI Queue Interrupt Service routine
1009 **********************************************************************/
1011 ixv_msix_que(void *arg)
1013 struct ix_queue *que = arg;
1014 struct adapter *adapter = que->adapter;
1015 struct tx_ring *txr = que->txr;
1016 struct rx_ring *rxr = que->rxr;
1017 bool more_tx, more_rx;
1020 ixv_disable_queue(adapter, que->msix);
1023 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1026 more_tx = ixv_txeof(txr);
1028 ** Make certain that if the stack
1029 ** has anything queued the task gets
1030 ** scheduled to handle it.
1032 #if __FreeBSD_version < 800000
1033 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1035 if (!drbr_empty(adapter->ifp, txr->br))
1040 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1044 if (ixv_enable_aim == FALSE)
1047 ** Do Adaptive Interrupt Moderation:
1048 ** - Write out last calculated setting
1049 ** - Calculate based on average size over
1050 ** the last interval.
1052 if (que->eitr_setting)
1053 IXGBE_WRITE_REG(&adapter->hw,
1054 IXGBE_VTEITR(que->msix),
1057 que->eitr_setting = 0;
1059 /* Idle, do nothing */
1060 if ((txr->bytes == 0) && (rxr->bytes == 0))
1063 if ((txr->bytes) && (txr->packets))
1064 newitr = txr->bytes/txr->packets;
1065 if ((rxr->bytes) && (rxr->packets))
1066 newitr = max(newitr,
1067 (rxr->bytes / rxr->packets));
1068 newitr += 24; /* account for hardware frame, crc */
1070 /* set an upper boundary */
1071 newitr = min(newitr, 3000);
1073 /* Be nice to the mid range */
1074 if ((newitr > 300) && (newitr < 1200))
1075 newitr = (newitr / 3);
1077 newitr = (newitr / 2);
1079 newitr |= newitr << 16;
1081 /* save for next interrupt */
1082 que->eitr_setting = newitr;
1091 if (more_tx || more_rx)
1092 taskqueue_enqueue(que->tq, &que->que_task);
1093 else /* Reenable this interrupt */
1094 ixv_enable_queue(adapter, que->msix);
1099 ixv_msix_mbx(void *arg)
1101 struct adapter *adapter = arg;
1102 struct ixgbe_hw *hw = &adapter->hw;
1107 /* First get the cause */
1108 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1109 /* Clear interrupt with write */
1110 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1112 /* Link status change */
1113 if (reg & IXGBE_EICR_LSC)
1114 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1116 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1120 /*********************************************************************
1122 * Media Ioctl callback
1124 * This routine is called whenever the user queries the status of
1125 * the interface using ifconfig.
1127 **********************************************************************/
1129 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1131 struct adapter *adapter = ifp->if_softc;
1133 INIT_DEBUGOUT("ixv_media_status: begin");
1134 IXV_CORE_LOCK(adapter);
1135 ixv_update_link_status(adapter);
1137 ifmr->ifm_status = IFM_AVALID;
1138 ifmr->ifm_active = IFM_ETHER;
1140 if (!adapter->link_active) {
1141 IXV_CORE_UNLOCK(adapter);
1145 ifmr->ifm_status |= IFM_ACTIVE;
1147 switch (adapter->link_speed) {
1148 case IXGBE_LINK_SPEED_1GB_FULL:
1149 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1151 case IXGBE_LINK_SPEED_10GB_FULL:
1152 ifmr->ifm_active |= IFM_FDX;
1156 IXV_CORE_UNLOCK(adapter);
1161 /*********************************************************************
1163 * Media Ioctl callback
1165 * This routine is called when the user changes speed/duplex using
1166 * media/mediopt option with ifconfig.
1168 **********************************************************************/
1170 ixv_media_change(struct ifnet * ifp)
1172 struct adapter *adapter = ifp->if_softc;
1173 struct ifmedia *ifm = &adapter->media;
1175 INIT_DEBUGOUT("ixv_media_change: begin");
1177 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1180 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1184 device_printf(adapter->dev, "Only auto media type\n");
1191 /*********************************************************************
1193 * This routine maps the mbufs to tx descriptors, allowing the
1194 * TX engine to transmit the packets.
1195 * - return 0 on success, positive on failure
1197 **********************************************************************/
1200 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1202 struct adapter *adapter = txr->adapter;
1203 u32 olinfo_status = 0, cmd_type_len;
1205 int i, j, error, nsegs;
1206 int first, last = 0;
1207 struct mbuf *m_head;
1208 bus_dma_segment_t segs[32];
1210 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1211 union ixgbe_adv_tx_desc *txd = NULL;
1215 /* Basic descriptor defines */
1216 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1217 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1219 if (m_head->m_flags & M_VLANTAG)
1220 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1223 * Important to capture the first descriptor
1224 * used because it will contain the index of
1225 * the one we tell the hardware to report back
1227 first = txr->next_avail_desc;
1228 txbuf = &txr->tx_buffers[first];
1229 txbuf_mapped = txbuf;
1233 * Map the packet for DMA.
1235 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1236 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1238 if (error == EFBIG) {
1241 m = m_defrag(*m_headp, M_DONTWAIT);
1243 adapter->mbuf_defrag_failed++;
1251 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1252 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1254 if (error == ENOMEM) {
1255 adapter->no_tx_dma_setup++;
1257 } else if (error != 0) {
1258 adapter->no_tx_dma_setup++;
1263 } else if (error == ENOMEM) {
1264 adapter->no_tx_dma_setup++;
1266 } else if (error != 0) {
1267 adapter->no_tx_dma_setup++;
1273 /* Make certain there are enough descriptors */
1274 if (nsegs > txr->tx_avail - 2) {
1275 txr->no_desc_avail++;
1282 ** Set up the appropriate offload context
1283 ** this becomes the first descriptor of
1286 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1287 if (ixv_tso_setup(txr, m_head, &paylen)) {
1288 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1289 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1290 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1291 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1295 } else if (ixv_tx_ctx_setup(txr, m_head))
1296 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1298 /* Record payload length */
1300 olinfo_status |= m_head->m_pkthdr.len <<
1301 IXGBE_ADVTXD_PAYLEN_SHIFT;
1303 i = txr->next_avail_desc;
1304 for (j = 0; j < nsegs; j++) {
1308 txbuf = &txr->tx_buffers[i];
1309 txd = &txr->tx_base[i];
1310 seglen = segs[j].ds_len;
1311 segaddr = htole64(segs[j].ds_addr);
1313 txd->read.buffer_addr = segaddr;
1314 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1315 cmd_type_len |seglen);
1316 txd->read.olinfo_status = htole32(olinfo_status);
1317 last = i; /* descriptor that will get completion IRQ */
1319 if (++i == adapter->num_tx_desc)
1322 txbuf->m_head = NULL;
1323 txbuf->eop_index = -1;
1326 txd->read.cmd_type_len |=
1327 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1328 txr->tx_avail -= nsegs;
1329 txr->next_avail_desc = i;
1331 txbuf->m_head = m_head;
1332 txr->tx_buffers[first].map = txbuf->map;
1334 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1336 /* Set the index of the descriptor that will be marked done */
1337 txbuf = &txr->tx_buffers[first];
1338 txbuf->eop_index = last;
1340 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1341 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1343 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1344 * hardware that this frame is available to transmit.
1346 ++txr->total_packets;
1347 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1352 bus_dmamap_unload(txr->txtag, txbuf->map);
1358 /*********************************************************************
1361 * This routine is called whenever multicast address list is updated.
1363 **********************************************************************/
1364 #define IXGBE_RAR_ENTRIES 16
1367 ixv_set_multi(struct adapter *adapter)
1369 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1371 struct ifmultiaddr *ifma;
1373 struct ifnet *ifp = adapter->ifp;
1375 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1377 #if __FreeBSD_version < 800000
1380 if_maddr_rlock(ifp);
1382 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1383 if (ifma->ifma_addr->sa_family != AF_LINK)
1385 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1386 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1387 IXGBE_ETH_LENGTH_OF_ADDRESS);
1390 #if __FreeBSD_version < 800000
1391 IF_ADDR_UNLOCK(ifp);
1393 if_maddr_runlock(ifp);
1398 ixgbe_update_mc_addr_list(&adapter->hw,
1399 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1405 * This is an iterator function now needed by the multicast
1406 * shared code. It simply feeds the shared code routine the
1407 * addresses in the array of ixv_set_multi() one by one.
1410 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1412 u8 *addr = *update_ptr;
1416 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1417 *update_ptr = newptr;
1421 /*********************************************************************
1424 * This routine checks for link status,updates statistics,
1425 * and runs the watchdog check.
1427 **********************************************************************/
1430 ixv_local_timer(void *arg)
1432 struct adapter *adapter = arg;
1433 device_t dev = adapter->dev;
1434 struct tx_ring *txr = adapter->tx_rings;
1437 mtx_assert(&adapter->core_mtx, MA_OWNED);
1439 ixv_update_link_status(adapter);
1442 ixv_update_stats(adapter);
1445 * If the interface has been paused
1446 * then don't do the watchdog check
1448 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1451 ** Check for time since any descriptor was cleaned
1453 for (i = 0; i < adapter->num_queues; i++, txr++) {
1455 if (txr->watchdog_check == FALSE) {
1459 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1464 ixv_rearm_queues(adapter, adapter->que_mask);
1465 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1469 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1470 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1471 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1472 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1473 device_printf(dev,"TX(%d) desc avail = %d,"
1474 "Next TX to Clean = %d\n",
1475 txr->me, txr->tx_avail, txr->next_to_clean);
1476 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1477 adapter->watchdog_events++;
1479 ixv_init_locked(adapter);
1483 ** Note: this routine updates the OS on the link state
1484 ** the real check of the hardware only happens with
1485 ** a link interrupt.
1488 ixv_update_link_status(struct adapter *adapter)
1490 struct ifnet *ifp = adapter->ifp;
1491 struct tx_ring *txr = adapter->tx_rings;
1492 device_t dev = adapter->dev;
1495 if (adapter->link_up){
1496 if (adapter->link_active == FALSE) {
1498 device_printf(dev,"Link is up %d Gbps %s \n",
1499 ((adapter->link_speed == 128)? 10:1),
1501 adapter->link_active = TRUE;
1502 if_link_state_change(ifp, LINK_STATE_UP);
1504 } else { /* Link down */
1505 if (adapter->link_active == TRUE) {
1507 device_printf(dev,"Link is Down\n");
1508 if_link_state_change(ifp, LINK_STATE_DOWN);
1509 adapter->link_active = FALSE;
1510 for (int i = 0; i < adapter->num_queues;
1512 txr->watchdog_check = FALSE;
1520 /*********************************************************************
1522 * This routine disables all traffic on the adapter by issuing a
1523 * global reset on the MAC and deallocates TX/RX buffers.
1525 **********************************************************************/
1531 struct adapter *adapter = arg;
1532 struct ixgbe_hw *hw = &adapter->hw;
1535 mtx_assert(&adapter->core_mtx, MA_OWNED);
1537 INIT_DEBUGOUT("ixv_stop: begin\n");
1538 ixv_disable_intr(adapter);
1540 /* Tell the stack that the interface is no longer active */
1541 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1544 adapter->hw.adapter_stopped = FALSE;
1545 ixgbe_stop_adapter(hw);
1546 callout_stop(&adapter->timer);
1548 /* reprogram the RAR[0] in case user changed it. */
1549 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1555 /*********************************************************************
1557 * Determine hardware revision.
1559 **********************************************************************/
1561 ixv_identify_hardware(struct adapter *adapter)
1563 device_t dev = adapter->dev;
1567 ** Make sure BUSMASTER is set, on a VM under
1568 ** KVM it may not be and will break things.
1570 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1571 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1572 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1573 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1574 "bits were not set!\n");
1575 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1576 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1579 /* Save off the information about this board */
1580 adapter->hw.vendor_id = pci_get_vendor(dev);
1581 adapter->hw.device_id = pci_get_device(dev);
1582 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1583 adapter->hw.subsystem_vendor_id =
1584 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1585 adapter->hw.subsystem_device_id =
1586 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1591 /*********************************************************************
1593 * Setup MSIX Interrupt resources and handlers
1595 **********************************************************************/
1597 ixv_allocate_msix(struct adapter *adapter)
1599 device_t dev = adapter->dev;
1600 struct ix_queue *que = adapter->queues;
1601 int error, rid, vector = 0;
1603 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1605 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1606 RF_SHAREABLE | RF_ACTIVE);
1607 if (que->res == NULL) {
1608 device_printf(dev,"Unable to allocate"
1609 " bus resource: que interrupt [%d]\n", vector);
1612 /* Set the handler function */
1613 error = bus_setup_intr(dev, que->res,
1614 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1615 ixv_msix_que, que, &que->tag);
1618 device_printf(dev, "Failed to register QUE handler");
1621 #if __FreeBSD_version >= 800504
1622 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1625 adapter->que_mask |= (u64)(1 << que->msix);
1627 ** Bind the msix vector, and thus the
1628 ** ring to the corresponding cpu.
1630 if (adapter->num_queues > 1)
1631 bus_bind_intr(dev, que->res, i);
1633 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1634 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1635 taskqueue_thread_enqueue, &que->tq);
1636 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1637 device_get_nameunit(adapter->dev));
1642 adapter->res = bus_alloc_resource_any(dev,
1643 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1644 if (!adapter->res) {
1645 device_printf(dev,"Unable to allocate"
1646 " bus resource: MBX interrupt [%d]\n", rid);
1649 /* Set the mbx handler function */
1650 error = bus_setup_intr(dev, adapter->res,
1651 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1652 ixv_msix_mbx, adapter, &adapter->tag);
1654 adapter->res = NULL;
1655 device_printf(dev, "Failed to register LINK handler");
1658 #if __FreeBSD_version >= 800504
1659 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1661 adapter->mbxvec = vector;
1662 /* Tasklets for Mailbox */
1663 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1664 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1665 taskqueue_thread_enqueue, &adapter->tq);
1666 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1667 device_get_nameunit(adapter->dev));
1669 ** Due to a broken design QEMU will fail to properly
1670 ** enable the guest for MSIX unless the vectors in
1671 ** the table are all set up, so we must rewrite the
1672 ** ENABLE in the MSIX control register again at this
1673 ** point to cause it to successfully initialize us.
1675 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1677 pci_find_cap(dev, PCIY_MSIX, &rid);
1678 rid += PCIR_MSIX_CTRL;
1679 msix_ctrl = pci_read_config(dev, rid, 2);
1680 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1681 pci_write_config(dev, rid, msix_ctrl, 2);
1688 * Setup MSIX resources, note that the VF
1689 * device MUST use MSIX, there is no fallback.
1692 ixv_setup_msix(struct adapter *adapter)
1694 device_t dev = adapter->dev;
1695 int rid, vectors, want = 2;
1698 /* First try MSI/X */
1700 adapter->msix_mem = bus_alloc_resource_any(dev,
1701 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1702 if (!adapter->msix_mem) {
1703 device_printf(adapter->dev,
1704 "Unable to map MSIX table \n");
1708 vectors = pci_msix_count(dev);
1710 bus_release_resource(dev, SYS_RES_MEMORY,
1711 rid, adapter->msix_mem);
1712 adapter->msix_mem = NULL;
1717 ** Want two vectors: one for a queue,
1718 ** plus an additional for mailbox.
1720 if (pci_alloc_msix(dev, &want) == 0) {
1721 device_printf(adapter->dev,
1722 "Using MSIX interrupts with %d vectors\n", want);
1726 device_printf(adapter->dev,"MSIX config error\n");
1732 ixv_allocate_pci_resources(struct adapter *adapter)
1735 device_t dev = adapter->dev;
1738 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1741 if (!(adapter->pci_mem)) {
1742 device_printf(dev,"Unable to allocate bus resource: memory\n");
1746 adapter->osdep.mem_bus_space_tag =
1747 rman_get_bustag(adapter->pci_mem);
1748 adapter->osdep.mem_bus_space_handle =
1749 rman_get_bushandle(adapter->pci_mem);
1750 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1752 adapter->num_queues = 1;
1753 adapter->hw.back = &adapter->osdep;
1756 ** Now setup MSI/X, should
1757 ** return us the number of
1758 ** configured vectors.
1760 adapter->msix = ixv_setup_msix(adapter);
1761 if (adapter->msix == ENXIO)
1768 ixv_free_pci_resources(struct adapter * adapter)
1770 struct ix_queue *que = adapter->queues;
1771 device_t dev = adapter->dev;
1774 memrid = PCIR_BAR(MSIX_BAR);
1777 ** There is a slight possibility of a failure mode
1778 ** in attach that will result in entering this function
1779 ** before interrupt resources have been initialized, and
1780 ** in that case we do not want to execute the loops below
1781 ** We can detect this reliably by the state of the adapter
1784 if (adapter->res == NULL)
1788 ** Release all msix queue resources:
1790 for (int i = 0; i < adapter->num_queues; i++, que++) {
1791 rid = que->msix + 1;
1792 if (que->tag != NULL) {
1793 bus_teardown_intr(dev, que->res, que->tag);
1796 if (que->res != NULL)
1797 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1801 /* Clean the Legacy or Link interrupt last */
1802 if (adapter->mbxvec) /* we are doing MSIX */
1803 rid = adapter->mbxvec + 1;
1805 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1807 if (adapter->tag != NULL) {
1808 bus_teardown_intr(dev, adapter->res, adapter->tag);
1809 adapter->tag = NULL;
1811 if (adapter->res != NULL)
1812 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1816 pci_release_msi(dev);
1818 if (adapter->msix_mem != NULL)
1819 bus_release_resource(dev, SYS_RES_MEMORY,
1820 memrid, adapter->msix_mem);
1822 if (adapter->pci_mem != NULL)
1823 bus_release_resource(dev, SYS_RES_MEMORY,
1824 PCIR_BAR(0), adapter->pci_mem);
1829 /*********************************************************************
1831 * Setup networking device structure and register an interface.
1833 **********************************************************************/
1835 ixv_setup_interface(device_t dev, struct adapter *adapter)
1839 INIT_DEBUGOUT("ixv_setup_interface: begin");
1841 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1843 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1844 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1845 ifp->if_baudrate = 1000000000;
1846 ifp->if_init = ixv_init;
1847 ifp->if_softc = adapter;
1848 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1849 ifp->if_ioctl = ixv_ioctl;
1850 #if __FreeBSD_version >= 800000
1851 ifp->if_transmit = ixv_mq_start;
1852 ifp->if_qflush = ixv_qflush;
1854 ifp->if_start = ixv_start;
1856 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1858 ether_ifattach(ifp, adapter->hw.mac.addr);
1860 adapter->max_frame_size =
1861 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1864 * Tell the upper layer(s) we support long frames.
1866 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1868 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1869 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1870 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1873 ifp->if_capenable = ifp->if_capabilities;
1875 /* Don't enable LRO by default */
1876 ifp->if_capabilities |= IFCAP_LRO;
1879 * Specify the media types supported by this adapter and register
1880 * callbacks to update media and link information
1882 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1884 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1885 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1886 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1892 ixv_config_link(struct adapter *adapter)
1894 struct ixgbe_hw *hw = &adapter->hw;
1895 u32 autoneg, err = 0;
1896 bool negotiate = TRUE;
1898 if (hw->mac.ops.check_link)
1899 err = hw->mac.ops.check_link(hw, &autoneg,
1900 &adapter->link_up, FALSE);
1904 if (hw->mac.ops.setup_link)
1905 err = hw->mac.ops.setup_link(hw, autoneg,
1906 negotiate, adapter->link_up);
1911 /********************************************************************
1912 * Manage DMA'able memory.
1913 *******************************************************************/
1915 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1919 *(bus_addr_t *) arg = segs->ds_addr;
1924 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1925 struct ixv_dma_alloc *dma, int mapflags)
1927 device_t dev = adapter->dev;
1930 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1931 DBA_ALIGN, 0, /* alignment, bounds */
1932 BUS_SPACE_MAXADDR, /* lowaddr */
1933 BUS_SPACE_MAXADDR, /* highaddr */
1934 NULL, NULL, /* filter, filterarg */
1937 size, /* maxsegsize */
1938 BUS_DMA_ALLOCNOW, /* flags */
1939 NULL, /* lockfunc */
1940 NULL, /* lockfuncarg */
1943 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1947 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1948 BUS_DMA_NOWAIT, &dma->dma_map);
1950 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1954 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1958 mapflags | BUS_DMA_NOWAIT);
1960 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1964 dma->dma_size = size;
1967 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1969 bus_dma_tag_destroy(dma->dma_tag);
1971 dma->dma_map = NULL;
1972 dma->dma_tag = NULL;
1977 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1979 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1980 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1981 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1982 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1983 bus_dma_tag_destroy(dma->dma_tag);
1987 /*********************************************************************
1989 * Allocate memory for the transmit and receive rings, and then
1990 * the descriptors associated with each, called only once at attach.
1992 **********************************************************************/
1994 ixv_allocate_queues(struct adapter *adapter)
1996 device_t dev = adapter->dev;
1997 struct ix_queue *que;
1998 struct tx_ring *txr;
1999 struct rx_ring *rxr;
2000 int rsize, tsize, error = 0;
2001 int txconf = 0, rxconf = 0;
2003 /* First allocate the top level queue structs */
2004 if (!(adapter->queues =
2005 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2006 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2007 device_printf(dev, "Unable to allocate queue memory\n");
2012 /* First allocate the TX ring struct memory */
2013 if (!(adapter->tx_rings =
2014 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2015 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2016 device_printf(dev, "Unable to allocate TX ring memory\n");
2021 /* Next allocate the RX */
2022 if (!(adapter->rx_rings =
2023 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2024 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2025 device_printf(dev, "Unable to allocate RX ring memory\n");
2030 /* For the ring itself */
2031 tsize = roundup2(adapter->num_tx_desc *
2032 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2035 * Now set up the TX queues, txconf is needed to handle the
2036 * possibility that things fail midcourse and we need to
2037 * undo memory gracefully
2039 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2040 /* Set up some basics */
2041 txr = &adapter->tx_rings[i];
2042 txr->adapter = adapter;
2045 /* Initialize the TX side lock */
2046 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2047 device_get_nameunit(dev), txr->me);
2048 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2050 if (ixv_dma_malloc(adapter, tsize,
2051 &txr->txdma, BUS_DMA_NOWAIT)) {
2053 "Unable to allocate TX Descriptor memory\n");
2057 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2058 bzero((void *)txr->tx_base, tsize);
2060 /* Now allocate transmit buffers for the ring */
2061 if (ixv_allocate_transmit_buffers(txr)) {
2063 "Critical Failure setting up transmit buffers\n");
2067 #if __FreeBSD_version >= 800000
2068 /* Allocate a buf ring */
2069 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2070 M_WAITOK, &txr->tx_mtx);
2071 if (txr->br == NULL) {
2073 "Critical Failure setting up buf ring\n");
2081 * Next the RX queues...
2083 rsize = roundup2(adapter->num_rx_desc *
2084 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2085 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2086 rxr = &adapter->rx_rings[i];
2087 /* Set up some basics */
2088 rxr->adapter = adapter;
2091 /* Initialize the RX side lock */
2092 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2093 device_get_nameunit(dev), rxr->me);
2094 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2096 if (ixv_dma_malloc(adapter, rsize,
2097 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2099 "Unable to allocate RxDescriptor memory\n");
2103 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2104 bzero((void *)rxr->rx_base, rsize);
2106 /* Allocate receive buffers for the ring*/
2107 if (ixv_allocate_receive_buffers(rxr)) {
2109 "Critical Failure setting up receive buffers\n");
2116 ** Finally set up the queue holding structs
2118 for (int i = 0; i < adapter->num_queues; i++) {
2119 que = &adapter->queues[i];
2120 que->adapter = adapter;
2121 que->txr = &adapter->tx_rings[i];
2122 que->rxr = &adapter->rx_rings[i];
2128 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2129 ixv_dma_free(adapter, &rxr->rxdma);
2131 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2132 ixv_dma_free(adapter, &txr->txdma);
2133 free(adapter->rx_rings, M_DEVBUF);
2135 free(adapter->tx_rings, M_DEVBUF);
2137 free(adapter->queues, M_DEVBUF);
2143 /*********************************************************************
2145 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2146 * the information needed to transmit a packet on the wire. This is
2147 * called only once at attach, setup is done every reset.
2149 **********************************************************************/
2151 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2153 struct adapter *adapter = txr->adapter;
2154 device_t dev = adapter->dev;
2155 struct ixv_tx_buf *txbuf;
2159 * Setup DMA descriptor areas.
2161 if ((error = bus_dma_tag_create(
2162 bus_get_dma_tag(adapter->dev), /* parent */
2163 1, 0, /* alignment, bounds */
2164 BUS_SPACE_MAXADDR, /* lowaddr */
2165 BUS_SPACE_MAXADDR, /* highaddr */
2166 NULL, NULL, /* filter, filterarg */
2167 IXV_TSO_SIZE, /* maxsize */
2169 PAGE_SIZE, /* maxsegsize */
2171 NULL, /* lockfunc */
2172 NULL, /* lockfuncarg */
2174 device_printf(dev,"Unable to allocate TX DMA tag\n");
2178 if (!(txr->tx_buffers =
2179 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2180 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2181 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2186 /* Create the descriptor buffer dma maps */
2187 txbuf = txr->tx_buffers;
2188 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2189 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2191 device_printf(dev, "Unable to create TX DMA map\n");
2198 /* We free all, it handles case where we are in the middle */
2199 ixv_free_transmit_structures(adapter);
2203 /*********************************************************************
2205 * Initialize a transmit ring.
2207 **********************************************************************/
2209 ixv_setup_transmit_ring(struct tx_ring *txr)
2211 struct adapter *adapter = txr->adapter;
2212 struct ixv_tx_buf *txbuf;
2215 /* Clear the old ring contents */
2217 bzero((void *)txr->tx_base,
2218 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2220 txr->next_avail_desc = 0;
2221 txr->next_to_clean = 0;
2223 /* Free any existing tx buffers. */
2224 txbuf = txr->tx_buffers;
2225 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2226 if (txbuf->m_head != NULL) {
2227 bus_dmamap_sync(txr->txtag, txbuf->map,
2228 BUS_DMASYNC_POSTWRITE);
2229 bus_dmamap_unload(txr->txtag, txbuf->map);
2230 m_freem(txbuf->m_head);
2231 txbuf->m_head = NULL;
2233 /* Clear the EOP index */
2234 txbuf->eop_index = -1;
2237 /* Set number of descriptors available */
2238 txr->tx_avail = adapter->num_tx_desc;
2240 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2241 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2245 /*********************************************************************
2247 * Initialize all transmit rings.
2249 **********************************************************************/
2251 ixv_setup_transmit_structures(struct adapter *adapter)
2253 struct tx_ring *txr = adapter->tx_rings;
2255 for (int i = 0; i < adapter->num_queues; i++, txr++)
2256 ixv_setup_transmit_ring(txr);
2261 /*********************************************************************
2263 * Enable transmit unit.
2265 **********************************************************************/
2267 ixv_initialize_transmit_units(struct adapter *adapter)
2269 struct tx_ring *txr = adapter->tx_rings;
2270 struct ixgbe_hw *hw = &adapter->hw;
2273 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2274 u64 tdba = txr->txdma.dma_paddr;
2277 /* Set WTHRESH to 8, burst writeback */
2278 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2279 txdctl |= (8 << 16);
2280 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2282 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2283 txdctl |= IXGBE_TXDCTL_ENABLE;
2284 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2286 /* Set the HW Tx Head and Tail indices */
2287 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2288 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2290 /* Setup Transmit Descriptor Cmd Settings */
2291 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2292 txr->watchdog_check = FALSE;
2294 /* Set Ring parameters */
2295 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2296 (tdba & 0x00000000ffffffffULL));
2297 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2298 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2299 adapter->num_tx_desc *
2300 sizeof(struct ixgbe_legacy_tx_desc));
2301 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2302 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2303 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2310 /*********************************************************************
2312 * Free all transmit rings.
2314 **********************************************************************/
2316 ixv_free_transmit_structures(struct adapter *adapter)
2318 struct tx_ring *txr = adapter->tx_rings;
2320 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2322 ixv_free_transmit_buffers(txr);
2323 ixv_dma_free(adapter, &txr->txdma);
2325 IXV_TX_LOCK_DESTROY(txr);
2327 free(adapter->tx_rings, M_DEVBUF);
2330 /*********************************************************************
2332 * Free transmit ring related data structures.
2334 **********************************************************************/
2336 ixv_free_transmit_buffers(struct tx_ring *txr)
2338 struct adapter *adapter = txr->adapter;
2339 struct ixv_tx_buf *tx_buffer;
2342 INIT_DEBUGOUT("free_transmit_ring: begin");
2344 if (txr->tx_buffers == NULL)
2347 tx_buffer = txr->tx_buffers;
2348 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2349 if (tx_buffer->m_head != NULL) {
2350 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2351 BUS_DMASYNC_POSTWRITE);
2352 bus_dmamap_unload(txr->txtag,
2354 m_freem(tx_buffer->m_head);
2355 tx_buffer->m_head = NULL;
2356 if (tx_buffer->map != NULL) {
2357 bus_dmamap_destroy(txr->txtag,
2359 tx_buffer->map = NULL;
2361 } else if (tx_buffer->map != NULL) {
2362 bus_dmamap_unload(txr->txtag,
2364 bus_dmamap_destroy(txr->txtag,
2366 tx_buffer->map = NULL;
2369 #if __FreeBSD_version >= 800000
2370 if (txr->br != NULL)
2371 buf_ring_free(txr->br, M_DEVBUF);
2373 if (txr->tx_buffers != NULL) {
2374 free(txr->tx_buffers, M_DEVBUF);
2375 txr->tx_buffers = NULL;
2377 if (txr->txtag != NULL) {
2378 bus_dma_tag_destroy(txr->txtag);
2384 /*********************************************************************
2386 * Advanced Context Descriptor setup for VLAN or CSUM
2388 **********************************************************************/
2391 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2393 struct adapter *adapter = txr->adapter;
2394 struct ixgbe_adv_tx_context_desc *TXD;
2395 struct ixv_tx_buf *tx_buffer;
2396 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2397 struct ether_vlan_header *eh;
2399 struct ip6_hdr *ip6;
2400 int ehdrlen, ip_hlen = 0;
2403 bool offload = TRUE;
2404 int ctxd = txr->next_avail_desc;
2408 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2412 tx_buffer = &txr->tx_buffers[ctxd];
2413 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2416 ** In advanced descriptors the vlan tag must
2417 ** be placed into the descriptor itself.
2419 if (mp->m_flags & M_VLANTAG) {
2420 vtag = htole16(mp->m_pkthdr.ether_vtag);
2421 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2422 } else if (offload == FALSE)
2426 * Determine where frame payload starts.
2427 * Jump over vlan headers if already present,
2428 * helpful for QinQ too.
2430 eh = mtod(mp, struct ether_vlan_header *);
2431 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2432 etype = ntohs(eh->evl_proto);
2433 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2435 etype = ntohs(eh->evl_encap_proto);
2436 ehdrlen = ETHER_HDR_LEN;
2439 /* Set the ether header length */
2440 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2444 ip = (struct ip *)(mp->m_data + ehdrlen);
2445 ip_hlen = ip->ip_hl << 2;
2446 if (mp->m_len < ehdrlen + ip_hlen)
2449 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2451 case ETHERTYPE_IPV6:
2452 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2453 ip_hlen = sizeof(struct ip6_hdr);
2454 if (mp->m_len < ehdrlen + ip_hlen)
2456 ipproto = ip6->ip6_nxt;
2457 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2464 vlan_macip_lens |= ip_hlen;
2465 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2469 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2470 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2474 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2475 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2478 #if __FreeBSD_version >= 800000
2480 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2481 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2489 /* Now copy bits into descriptor */
2490 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2491 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2492 TXD->seqnum_seed = htole32(0);
2493 TXD->mss_l4len_idx = htole32(0);
2495 tx_buffer->m_head = NULL;
2496 tx_buffer->eop_index = -1;
2498 /* We've consumed the first desc, adjust counters */
2499 if (++ctxd == adapter->num_tx_desc)
2501 txr->next_avail_desc = ctxd;
2507 /**********************************************************************
2509 * Setup work for hardware segmentation offload (TSO) on
2510 * adapters using advanced tx descriptors
2512 **********************************************************************/
2514 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2516 struct adapter *adapter = txr->adapter;
2517 struct ixgbe_adv_tx_context_desc *TXD;
2518 struct ixv_tx_buf *tx_buffer;
2519 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2520 u32 mss_l4len_idx = 0;
2522 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2523 struct ether_vlan_header *eh;
2529 * Determine where frame payload starts.
2530 * Jump over vlan headers if already present
2532 eh = mtod(mp, struct ether_vlan_header *);
2533 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2534 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2536 ehdrlen = ETHER_HDR_LEN;
2538 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2539 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2542 ctxd = txr->next_avail_desc;
2543 tx_buffer = &txr->tx_buffers[ctxd];
2544 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2546 ip = (struct ip *)(mp->m_data + ehdrlen);
2547 if (ip->ip_p != IPPROTO_TCP)
2548 return FALSE; /* 0 */
2550 ip_hlen = ip->ip_hl << 2;
2551 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2552 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2553 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2554 tcp_hlen = th->th_off << 2;
2555 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2557 /* This is used in the transmit desc in encap */
2558 *paylen = mp->m_pkthdr.len - hdrlen;
2560 /* VLAN MACLEN IPLEN */
2561 if (mp->m_flags & M_VLANTAG) {
2562 vtag = htole16(mp->m_pkthdr.ether_vtag);
2563 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2566 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2567 vlan_macip_lens |= ip_hlen;
2568 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2570 /* ADV DTYPE TUCMD */
2571 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2572 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2573 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2574 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2578 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2579 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2580 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2582 TXD->seqnum_seed = htole32(0);
2583 tx_buffer->m_head = NULL;
2584 tx_buffer->eop_index = -1;
2586 if (++ctxd == adapter->num_tx_desc)
2590 txr->next_avail_desc = ctxd;
2595 /**********************************************************************
2597 * Examine each tx_buffer in the used queue. If the hardware is done
2598 * processing the packet then free associated resources. The
2599 * tx_buffer is put back on the free queue.
2601 **********************************************************************/
2603 ixv_txeof(struct tx_ring *txr)
2605 struct adapter *adapter = txr->adapter;
2606 struct ifnet *ifp = adapter->ifp;
2607 u32 first, last, done;
2608 struct ixv_tx_buf *tx_buffer;
2609 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2611 mtx_assert(&txr->tx_mtx, MA_OWNED);
2613 if (txr->tx_avail == adapter->num_tx_desc)
2616 first = txr->next_to_clean;
2617 tx_buffer = &txr->tx_buffers[first];
2618 /* For cleanup we just use legacy struct */
2619 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2620 last = tx_buffer->eop_index;
2623 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2626 ** Get the index of the first descriptor
2627 ** BEYOND the EOP and call that 'done'.
2628 ** I do this so the comparison in the
2629 ** inner while loop below can be simple
2631 if (++last == adapter->num_tx_desc) last = 0;
2634 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2635 BUS_DMASYNC_POSTREAD);
2637 ** Only the EOP descriptor of a packet now has the DD
2638 ** bit set, this is what we look for...
2640 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2641 /* We clean the range of the packet */
2642 while (first != done) {
2643 tx_desc->upper.data = 0;
2644 tx_desc->lower.data = 0;
2645 tx_desc->buffer_addr = 0;
2648 if (tx_buffer->m_head) {
2649 bus_dmamap_sync(txr->txtag,
2651 BUS_DMASYNC_POSTWRITE);
2652 bus_dmamap_unload(txr->txtag,
2654 m_freem(tx_buffer->m_head);
2655 tx_buffer->m_head = NULL;
2656 tx_buffer->map = NULL;
2658 tx_buffer->eop_index = -1;
2659 txr->watchdog_time = ticks;
2661 if (++first == adapter->num_tx_desc)
2664 tx_buffer = &txr->tx_buffers[first];
2666 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2669 /* See if there is more work now */
2670 last = tx_buffer->eop_index;
2673 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2674 /* Get next done point */
2675 if (++last == adapter->num_tx_desc) last = 0;
2680 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2681 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2683 txr->next_to_clean = first;
2686 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2687 * it is OK to send packets. If there are no pending descriptors,
2688 * clear the timeout. Otherwise, if some descriptors have been freed,
2689 * restart the timeout.
2691 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2692 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2693 if (txr->tx_avail == adapter->num_tx_desc) {
2694 txr->watchdog_check = FALSE;
2702 /*********************************************************************
2704 * Refresh mbuf buffers for RX descriptor rings
2705 * - now keeps its own state so discards due to resource
2706 * exhaustion are unnecessary, if an mbuf cannot be obtained
2707 * it just returns, keeping its placeholder, thus it can simply
2708 * be recalled to try again.
2710 **********************************************************************/
2712 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2714 struct adapter *adapter = rxr->adapter;
2715 bus_dma_segment_t hseg[1];
2716 bus_dma_segment_t pseg[1];
2717 struct ixv_rx_buf *rxbuf;
2718 struct mbuf *mh, *mp;
2719 int i, j, nsegs, error;
2720 bool refreshed = FALSE;
2722 i = j = rxr->next_to_refresh;
2723 /* Get the control variable, one beyond refresh point */
2724 if (++j == adapter->num_rx_desc)
2726 while (j != limit) {
2727 rxbuf = &rxr->rx_buffers[i];
2728 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2729 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2732 mh->m_pkthdr.len = mh->m_len = MHLEN;
2734 mh->m_flags |= M_PKTHDR;
2735 m_adj(mh, ETHER_ALIGN);
2736 /* Get the memory mapping */
2737 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2738 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2740 printf("GET BUF: dmamap load"
2741 " failure - %d\n", error);
2746 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2747 BUS_DMASYNC_PREREAD);
2748 rxr->rx_base[i].read.hdr_addr =
2749 htole64(hseg[0].ds_addr);
2752 if (rxbuf->m_pack == NULL) {
2753 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2754 M_PKTHDR, adapter->rx_mbuf_sz);
2760 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2761 /* Get the memory mapping */
2762 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2763 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2765 printf("GET BUF: dmamap load"
2766 " failure - %d\n", error);
2768 rxbuf->m_pack = NULL;
2772 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2773 BUS_DMASYNC_PREREAD);
2774 rxr->rx_base[i].read.pkt_addr =
2775 htole64(pseg[0].ds_addr);
2778 rxr->next_to_refresh = i = j;
2779 /* Calculate next index */
2780 if (++j == adapter->num_rx_desc)
2784 if (refreshed) /* update tail index */
2785 IXGBE_WRITE_REG(&adapter->hw,
2786 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2790 /*********************************************************************
2792 * Allocate memory for rx_buffer structures. Since we use one
2793 * rx_buffer per received packet, the maximum number of rx_buffer's
2794 * that we'll need is equal to the number of receive descriptors
2795 * that we've allocated.
2797 **********************************************************************/
2799 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2801 struct adapter *adapter = rxr->adapter;
2802 device_t dev = adapter->dev;
2803 struct ixv_rx_buf *rxbuf;
2804 int i, bsize, error;
2806 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2807 if (!(rxr->rx_buffers =
2808 (struct ixv_rx_buf *) malloc(bsize,
2809 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2810 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2815 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2816 1, 0, /* alignment, bounds */
2817 BUS_SPACE_MAXADDR, /* lowaddr */
2818 BUS_SPACE_MAXADDR, /* highaddr */
2819 NULL, NULL, /* filter, filterarg */
2820 MSIZE, /* maxsize */
2822 MSIZE, /* maxsegsize */
2824 NULL, /* lockfunc */
2825 NULL, /* lockfuncarg */
2827 device_printf(dev, "Unable to create RX DMA tag\n");
2831 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2832 1, 0, /* alignment, bounds */
2833 BUS_SPACE_MAXADDR, /* lowaddr */
2834 BUS_SPACE_MAXADDR, /* highaddr */
2835 NULL, NULL, /* filter, filterarg */
2836 MJUMPAGESIZE, /* maxsize */
2838 MJUMPAGESIZE, /* maxsegsize */
2840 NULL, /* lockfunc */
2841 NULL, /* lockfuncarg */
2843 device_printf(dev, "Unable to create RX DMA tag\n");
2847 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2848 rxbuf = &rxr->rx_buffers[i];
2849 error = bus_dmamap_create(rxr->htag,
2850 BUS_DMA_NOWAIT, &rxbuf->hmap);
2852 device_printf(dev, "Unable to create RX head map\n");
2855 error = bus_dmamap_create(rxr->ptag,
2856 BUS_DMA_NOWAIT, &rxbuf->pmap);
2858 device_printf(dev, "Unable to create RX pkt map\n");
2866 /* Frees all, but can handle partial completion */
2867 ixv_free_receive_structures(adapter);
2872 ixv_free_receive_ring(struct rx_ring *rxr)
2874 struct adapter *adapter;
2875 struct ixv_rx_buf *rxbuf;
2878 adapter = rxr->adapter;
2879 for (i = 0; i < adapter->num_rx_desc; i++) {
2880 rxbuf = &rxr->rx_buffers[i];
2881 if (rxbuf->m_head != NULL) {
2882 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2883 BUS_DMASYNC_POSTREAD);
2884 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2885 rxbuf->m_head->m_flags |= M_PKTHDR;
2886 m_freem(rxbuf->m_head);
2888 if (rxbuf->m_pack != NULL) {
2889 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2890 BUS_DMASYNC_POSTREAD);
2891 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2892 rxbuf->m_pack->m_flags |= M_PKTHDR;
2893 m_freem(rxbuf->m_pack);
2895 rxbuf->m_head = NULL;
2896 rxbuf->m_pack = NULL;
2901 /*********************************************************************
2903 * Initialize a receive ring and its buffers.
2905 **********************************************************************/
2907 ixv_setup_receive_ring(struct rx_ring *rxr)
2909 struct adapter *adapter;
2912 struct ixv_rx_buf *rxbuf;
2913 bus_dma_segment_t pseg[1], hseg[1];
2914 struct lro_ctrl *lro = &rxr->lro;
2915 int rsize, nsegs, error = 0;
2917 adapter = rxr->adapter;
2921 /* Clear the ring contents */
2923 rsize = roundup2(adapter->num_rx_desc *
2924 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2925 bzero((void *)rxr->rx_base, rsize);
2927 /* Free current RX buffer structs and their mbufs */
2928 ixv_free_receive_ring(rxr);
2930 /* Configure header split? */
2931 if (ixv_header_split)
2932 rxr->hdr_split = TRUE;
2934 /* Now replenish the mbufs */
2935 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2936 struct mbuf *mh, *mp;
2938 rxbuf = &rxr->rx_buffers[j];
2940 ** Dont allocate mbufs if not
2941 ** doing header split, its wasteful
2943 if (rxr->hdr_split == FALSE)
2946 /* First the header */
2947 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2948 if (rxbuf->m_head == NULL) {
2952 m_adj(rxbuf->m_head, ETHER_ALIGN);
2954 mh->m_len = mh->m_pkthdr.len = MHLEN;
2955 mh->m_flags |= M_PKTHDR;
2956 /* Get the memory mapping */
2957 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2958 rxbuf->hmap, rxbuf->m_head, hseg,
2959 &nsegs, BUS_DMA_NOWAIT);
2960 if (error != 0) /* Nothing elegant to do here */
2962 bus_dmamap_sync(rxr->htag,
2963 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2964 /* Update descriptor */
2965 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2968 /* Now the payload cluster */
2969 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2970 M_PKTHDR, adapter->rx_mbuf_sz);
2971 if (rxbuf->m_pack == NULL) {
2976 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2977 /* Get the memory mapping */
2978 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2979 rxbuf->pmap, mp, pseg,
2980 &nsegs, BUS_DMA_NOWAIT);
2983 bus_dmamap_sync(rxr->ptag,
2984 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2985 /* Update descriptor */
2986 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2990 /* Setup our descriptor indices */
2991 rxr->next_to_check = 0;
2992 rxr->next_to_refresh = 0;
2993 rxr->lro_enabled = FALSE;
2994 rxr->rx_split_packets = 0;
2996 rxr->discard = FALSE;
2998 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2999 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3002 ** Now set up the LRO interface:
3004 if (ifp->if_capenable & IFCAP_LRO) {
3005 int err = tcp_lro_init(lro);
3007 device_printf(dev, "LRO Initialization failed!\n");
3010 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3011 rxr->lro_enabled = TRUE;
3012 lro->ifp = adapter->ifp;
3019 ixv_free_receive_ring(rxr);
3024 /*********************************************************************
3026 * Initialize all receive rings.
3028 **********************************************************************/
3030 ixv_setup_receive_structures(struct adapter *adapter)
3032 struct rx_ring *rxr = adapter->rx_rings;
3035 for (j = 0; j < adapter->num_queues; j++, rxr++)
3036 if (ixv_setup_receive_ring(rxr))
3042 * Free RX buffers allocated so far, we will only handle
3043 * the rings that completed, the failing case will have
3044 * cleaned up for itself. 'j' failed, so its the terminus.
3046 for (int i = 0; i < j; ++i) {
3047 rxr = &adapter->rx_rings[i];
3048 ixv_free_receive_ring(rxr);
3054 /*********************************************************************
3056 * Setup receive registers and features.
3058 **********************************************************************/
3059 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3062 ixv_initialize_receive_units(struct adapter *adapter)
3064 struct rx_ring *rxr = adapter->rx_rings;
3065 struct ixgbe_hw *hw = &adapter->hw;
3066 struct ifnet *ifp = adapter->ifp;
3067 u32 bufsz, fctrl, rxcsum, hlreg;
3070 /* Enable broadcasts */
3071 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3072 fctrl |= IXGBE_FCTRL_BAM;
3073 fctrl |= IXGBE_FCTRL_DPF;
3074 fctrl |= IXGBE_FCTRL_PMCF;
3075 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3077 /* Set for Jumbo Frames? */
3078 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3079 if (ifp->if_mtu > ETHERMTU) {
3080 hlreg |= IXGBE_HLREG0_JUMBOEN;
3081 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3083 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3084 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3086 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3088 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3089 u64 rdba = rxr->rxdma.dma_paddr;
3092 /* Do the queue enabling first */
3093 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3094 rxdctl |= IXGBE_RXDCTL_ENABLE;
3095 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3096 for (int k = 0; k < 10; k++) {
3097 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3098 IXGBE_RXDCTL_ENABLE)
3105 /* Setup the Base and Length of the Rx Descriptor Ring */
3106 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3107 (rdba & 0x00000000ffffffffULL));
3108 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3110 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3111 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3113 /* Set up the SRRCTL register */
3114 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3115 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3116 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3118 if (rxr->hdr_split) {
3119 /* Use a standard mbuf for the header */
3120 reg |= ((IXV_RX_HDR <<
3121 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3122 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3123 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3125 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3126 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3128 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3129 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3130 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3131 adapter->num_rx_desc - 1);
3134 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3136 if (ifp->if_capenable & IFCAP_RXCSUM)
3137 rxcsum |= IXGBE_RXCSUM_PCSD;
3139 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3140 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3142 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3147 /*********************************************************************
3149 * Free all receive rings.
3151 **********************************************************************/
3153 ixv_free_receive_structures(struct adapter *adapter)
3155 struct rx_ring *rxr = adapter->rx_rings;
3157 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3158 struct lro_ctrl *lro = &rxr->lro;
3159 ixv_free_receive_buffers(rxr);
3160 /* Free LRO memory */
3162 /* Free the ring memory as well */
3163 ixv_dma_free(adapter, &rxr->rxdma);
3166 free(adapter->rx_rings, M_DEVBUF);
3170 /*********************************************************************
3172 * Free receive ring data structures
3174 **********************************************************************/
3176 ixv_free_receive_buffers(struct rx_ring *rxr)
3178 struct adapter *adapter = rxr->adapter;
3179 struct ixv_rx_buf *rxbuf;
3181 INIT_DEBUGOUT("free_receive_structures: begin");
3183 /* Cleanup any existing buffers */
3184 if (rxr->rx_buffers != NULL) {
3185 for (int i = 0; i < adapter->num_rx_desc; i++) {
3186 rxbuf = &rxr->rx_buffers[i];
3187 if (rxbuf->m_head != NULL) {
3188 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3189 BUS_DMASYNC_POSTREAD);
3190 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3191 rxbuf->m_head->m_flags |= M_PKTHDR;
3192 m_freem(rxbuf->m_head);
3194 if (rxbuf->m_pack != NULL) {
3195 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3196 BUS_DMASYNC_POSTREAD);
3197 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3198 rxbuf->m_pack->m_flags |= M_PKTHDR;
3199 m_freem(rxbuf->m_pack);
3201 rxbuf->m_head = NULL;
3202 rxbuf->m_pack = NULL;
3203 if (rxbuf->hmap != NULL) {
3204 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3207 if (rxbuf->pmap != NULL) {
3208 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3212 if (rxr->rx_buffers != NULL) {
3213 free(rxr->rx_buffers, M_DEVBUF);
3214 rxr->rx_buffers = NULL;
3218 if (rxr->htag != NULL) {
3219 bus_dma_tag_destroy(rxr->htag);
3222 if (rxr->ptag != NULL) {
3223 bus_dma_tag_destroy(rxr->ptag);
3230 static __inline void
3231 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3235 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3236 * should be computed by hardware. Also it should not have VLAN tag in
3239 if (rxr->lro_enabled &&
3240 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3241 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3242 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3243 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3244 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3245 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3247 * Send to the stack if:
3248 ** - LRO not enabled, or
3249 ** - no LRO resources, or
3250 ** - lro enqueue fails
3252 if (rxr->lro.lro_cnt != 0)
3253 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3257 (*ifp->if_input)(ifp, m);
3261 static __inline void
3262 ixv_rx_discard(struct rx_ring *rxr, int i)
3264 struct ixv_rx_buf *rbuf;
3266 rbuf = &rxr->rx_buffers[i];
3268 if (rbuf->fmp != NULL) {/* Partial chain ? */
3269 rbuf->fmp->m_flags |= M_PKTHDR;
3275 ** With advanced descriptors the writeback
3276 ** clobbers the buffer addrs, so its easier
3277 ** to just free the existing mbufs and take
3278 ** the normal refresh path to get new buffers
3282 m_free(rbuf->m_head);
3283 rbuf->m_head = NULL;
3287 m_free(rbuf->m_pack);
3288 rbuf->m_pack = NULL;
3295 /*********************************************************************
3297 * This routine executes in interrupt context. It replenishes
3298 * the mbufs in the descriptor and sends data which has been
3299 * dma'ed into host memory to upper layer.
3301 * We loop at most count times if count is > 0, or until done if
3304 * Return TRUE for more work, FALSE for all clean.
3305 *********************************************************************/
3307 ixv_rxeof(struct ix_queue *que, int count)
3309 struct adapter *adapter = que->adapter;
3310 struct rx_ring *rxr = que->rxr;
3311 struct ifnet *ifp = adapter->ifp;
3312 struct lro_ctrl *lro = &rxr->lro;
3313 struct lro_entry *queued;
3314 int i, nextp, processed = 0;
3316 union ixgbe_adv_rx_desc *cur;
3317 struct ixv_rx_buf *rbuf, *nbuf;
3321 for (i = rxr->next_to_check; count != 0;) {
3322 struct mbuf *sendmp, *mh, *mp;
3324 u16 hlen, plen, hdr, vtag;
3327 /* Sync the ring. */
3328 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3329 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3331 cur = &rxr->rx_base[i];
3332 staterr = le32toh(cur->wb.upper.status_error);
3334 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3336 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3343 cur->wb.upper.status_error = 0;
3344 rbuf = &rxr->rx_buffers[i];
3348 plen = le16toh(cur->wb.upper.length);
3349 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3350 IXGBE_RXDADV_PKTTYPE_MASK;
3351 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3352 vtag = le16toh(cur->wb.upper.vlan);
3353 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3355 /* Make sure all parts of a bad packet are discarded */
3356 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3359 rxr->rx_discarded++;
3361 rxr->discard = TRUE;
3363 rxr->discard = FALSE;
3364 ixv_rx_discard(rxr, i);
3370 if (nextp == adapter->num_rx_desc)
3372 nbuf = &rxr->rx_buffers[nextp];
3376 ** The header mbuf is ONLY used when header
3377 ** split is enabled, otherwise we get normal
3378 ** behavior, ie, both header and payload
3379 ** are DMA'd into the payload buffer.
3381 ** Rather than using the fmp/lmp global pointers
3382 ** we now keep the head of a packet chain in the
3383 ** buffer struct and pass this along from one
3384 ** descriptor to the next, until we get EOP.
3386 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3387 /* This must be an initial descriptor */
3388 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3389 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3390 if (hlen > IXV_RX_HDR)
3393 mh->m_flags |= M_PKTHDR;
3395 mh->m_pkthdr.len = mh->m_len;
3396 /* Null buf pointer so it is refreshed */
3397 rbuf->m_head = NULL;
3399 ** Check the payload length, this
3400 ** could be zero if its a small
3406 mp->m_flags &= ~M_PKTHDR;
3408 mh->m_pkthdr.len += mp->m_len;
3409 /* Null buf pointer so it is refreshed */
3410 rbuf->m_pack = NULL;
3411 rxr->rx_split_packets++;
3414 ** Now create the forward
3415 ** chain so when complete
3419 /* stash the chain head */
3421 /* Make forward chain */
3423 mp->m_next = nbuf->m_pack;
3425 mh->m_next = nbuf->m_pack;
3427 /* Singlet, prepare to send */
3429 if ((adapter->num_vlans) &&
3430 (staterr & IXGBE_RXD_STAT_VP)) {
3431 sendmp->m_pkthdr.ether_vtag = vtag;
3432 sendmp->m_flags |= M_VLANTAG;
3437 ** Either no header split, or a
3438 ** secondary piece of a fragmented
3443 ** See if there is a stored head
3444 ** that determines what we are
3447 rbuf->m_pack = rbuf->fmp = NULL;
3449 if (sendmp != NULL) /* secondary frag */
3450 sendmp->m_pkthdr.len += mp->m_len;
3452 /* first desc of a non-ps chain */
3454 sendmp->m_flags |= M_PKTHDR;
3455 sendmp->m_pkthdr.len = mp->m_len;
3456 if (staterr & IXGBE_RXD_STAT_VP) {
3457 sendmp->m_pkthdr.ether_vtag = vtag;
3458 sendmp->m_flags |= M_VLANTAG;
3461 /* Pass the head pointer on */
3465 mp->m_next = nbuf->m_pack;
3469 /* Sending this frame? */
3471 sendmp->m_pkthdr.rcvif = ifp;
3474 /* capture data for AIM */
3475 rxr->bytes += sendmp->m_pkthdr.len;
3476 rxr->rx_bytes += sendmp->m_pkthdr.len;
3477 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3478 ixv_rx_checksum(staterr, sendmp, ptype);
3479 #if __FreeBSD_version >= 800000
3480 sendmp->m_pkthdr.flowid = que->msix;
3481 sendmp->m_flags |= M_FLOWID;
3485 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3486 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3488 /* Advance our pointers to the next descriptor. */
3489 if (++i == adapter->num_rx_desc)
3492 /* Now send to the stack or do LRO */
3494 ixv_rx_input(rxr, ifp, sendmp, ptype);
3496 /* Every 8 descriptors we go to refresh mbufs */
3497 if (processed == 8) {
3498 ixv_refresh_mbufs(rxr, i);
3503 /* Refresh any remaining buf structs */
3504 if (ixv_rx_unrefreshed(rxr))
3505 ixv_refresh_mbufs(rxr, i);
3507 rxr->next_to_check = i;
3510 * Flush any outstanding LRO work
3512 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3513 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3514 tcp_lro_flush(lro, queued);
3520 ** We still have cleaning to do?
3521 ** Schedule another interrupt if so.
3523 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3524 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3532 /*********************************************************************
3534 * Verify that the hardware indicated that the checksum is valid.
3535 * Inform the stack about the status of checksum so that stack
3536 * doesn't spend time verifying the checksum.
3538 *********************************************************************/
3540 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3542 u16 status = (u16) staterr;
3543 u8 errors = (u8) (staterr >> 24);
3546 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3547 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3550 if (status & IXGBE_RXD_STAT_IPCS) {
3551 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3552 /* IP Checksum Good */
3553 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3554 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3557 mp->m_pkthdr.csum_flags = 0;
3559 if (status & IXGBE_RXD_STAT_L4CS) {
3560 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3561 #if __FreeBSD_version >= 800000
3563 type = CSUM_SCTP_VALID;
3565 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3566 mp->m_pkthdr.csum_flags |= type;
3568 mp->m_pkthdr.csum_data = htons(0xffff);
3575 ixv_setup_vlan_support(struct adapter *adapter)
3577 struct ixgbe_hw *hw = &adapter->hw;
3578 u32 ctrl, vid, vfta, retry;
3582 ** We get here thru init_locked, meaning
3583 ** a soft reset, this has already cleared
3584 ** the VFTA and other state, so if there
3585 ** have been no vlan's registered do nothing.
3587 if (adapter->num_vlans == 0)
3590 /* Enable the queues */
3591 for (int i = 0; i < adapter->num_queues; i++) {
3592 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3593 ctrl |= IXGBE_RXDCTL_VME;
3594 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3598 ** A soft reset zero's out the VFTA, so
3599 ** we need to repopulate it now.
3601 for (int i = 0; i < VFTA_SIZE; i++) {
3602 if (ixv_shadow_vfta[i] == 0)
3604 vfta = ixv_shadow_vfta[i];
3606 ** Reconstruct the vlan id's
3607 ** based on the bits set in each
3608 ** of the array ints.
3610 for ( int j = 0; j < 32; j++) {
3612 if ((vfta & (1 << j)) == 0)
3615 /* Call the shared code mailbox routine */
3616 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3625 ** This routine is run via an vlan config EVENT,
3626 ** it enables us to use the HW Filter table since
3627 ** we can get the vlan id. This just creates the
3628 ** entry in the soft version of the VFTA, init will
3629 ** repopulate the real table.
3632 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3634 struct adapter *adapter = ifp->if_softc;
3637 if (ifp->if_softc != arg) /* Not our event */
3640 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3643 IXV_CORE_LOCK(adapter);
3644 index = (vtag >> 5) & 0x7F;
3646 ixv_shadow_vfta[index] |= (1 << bit);
3647 ++adapter->num_vlans;
3648 /* Re-init to load the changes */
3649 ixv_init_locked(adapter);
3650 IXV_CORE_UNLOCK(adapter);
3654 ** This routine is run via an vlan
3655 ** unconfig EVENT, remove our entry
3656 ** in the soft vfta.
3659 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3661 struct adapter *adapter = ifp->if_softc;
3664 if (ifp->if_softc != arg)
3667 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3670 IXV_CORE_LOCK(adapter);
3671 index = (vtag >> 5) & 0x7F;
3673 ixv_shadow_vfta[index] &= ~(1 << bit);
3674 --adapter->num_vlans;
3675 /* Re-init to load the changes */
3676 ixv_init_locked(adapter);
3677 IXV_CORE_UNLOCK(adapter);
3681 ixv_enable_intr(struct adapter *adapter)
3683 struct ixgbe_hw *hw = &adapter->hw;
3684 struct ix_queue *que = adapter->queues;
3685 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3688 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3690 mask = IXGBE_EIMS_ENABLE_MASK;
3691 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3692 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3694 for (int i = 0; i < adapter->num_queues; i++, que++)
3695 ixv_enable_queue(adapter, que->msix);
3697 IXGBE_WRITE_FLUSH(hw);
3703 ixv_disable_intr(struct adapter *adapter)
3705 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3706 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3707 IXGBE_WRITE_FLUSH(&adapter->hw);
3712 ** Setup the correct IVAR register for a particular MSIX interrupt
3713 ** - entry is the register array entry
3714 ** - vector is the MSIX vector for this queue
3715 ** - type is RX/TX/MISC
3718 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3720 struct ixgbe_hw *hw = &adapter->hw;
3723 vector |= IXGBE_IVAR_ALLOC_VAL;
3725 if (type == -1) { /* MISC IVAR */
3726 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3729 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3730 } else { /* RX/TX IVARS */
3731 index = (16 * (entry & 1)) + (8 * type);
3732 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3733 ivar &= ~(0xFF << index);
3734 ivar |= (vector << index);
3735 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3740 ixv_configure_ivars(struct adapter *adapter)
3742 struct ix_queue *que = adapter->queues;
3744 for (int i = 0; i < adapter->num_queues; i++, que++) {
3745 /* First the RX queue entry */
3746 ixv_set_ivar(adapter, i, que->msix, 0);
3747 /* ... and the TX */
3748 ixv_set_ivar(adapter, i, que->msix, 1);
3749 /* Set an initial value in EITR */
3750 IXGBE_WRITE_REG(&adapter->hw,
3751 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3754 /* For the Link interrupt */
3755 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3760 ** Tasklet handler for MSIX MBX interrupts
3761 ** - do outside interrupt since it might sleep
3764 ixv_handle_mbx(void *context, int pending)
3766 struct adapter *adapter = context;
3768 ixgbe_check_link(&adapter->hw,
3769 &adapter->link_speed, &adapter->link_up, 0);
3770 ixv_update_link_status(adapter);
3774 ** The VF stats registers never have a truely virgin
3775 ** starting point, so this routine tries to make an
3776 ** artificial one, marking ground zero on attach as
3780 ixv_save_stats(struct adapter *adapter)
3782 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3783 adapter->stats.saved_reset_vfgprc +=
3784 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3785 adapter->stats.saved_reset_vfgptc +=
3786 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3787 adapter->stats.saved_reset_vfgorc +=
3788 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3789 adapter->stats.saved_reset_vfgotc +=
3790 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3791 adapter->stats.saved_reset_vfmprc +=
3792 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3797 ixv_init_stats(struct adapter *adapter)
3799 struct ixgbe_hw *hw = &adapter->hw;
3801 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3802 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3803 adapter->stats.last_vfgorc |=
3804 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3806 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3807 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3808 adapter->stats.last_vfgotc |=
3809 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3811 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3813 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3814 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3815 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3816 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3817 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3820 #define UPDATE_STAT_32(reg, last, count) \
3822 u32 current = IXGBE_READ_REG(hw, reg); \
3823 if (current < last) \
3824 count += 0x100000000LL; \
3826 count &= 0xFFFFFFFF00000000LL; \
3830 #define UPDATE_STAT_36(lsb, msb, last, count) \
3832 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3833 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3834 u64 current = ((cur_msb << 32) | cur_lsb); \
3835 if (current < last) \
3836 count += 0x1000000000LL; \
3838 count &= 0xFFFFFFF000000000LL; \
3843 ** ixv_update_stats - Update the board statistics counters.
3846 ixv_update_stats(struct adapter *adapter)
3848 struct ixgbe_hw *hw = &adapter->hw;
3850 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3851 adapter->stats.vfgprc);
3852 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3853 adapter->stats.vfgptc);
3854 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3855 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3856 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3857 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3858 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3859 adapter->stats.vfmprc);
3862 /**********************************************************************
3864 * This routine is called only when ixgbe_display_debug_stats is enabled.
3865 * This routine provides a way to take a look at important statistics
3866 * maintained by the driver and hardware.
3868 **********************************************************************/
3870 ixv_print_hw_stats(struct adapter * adapter)
3872 device_t dev = adapter->dev;
3874 device_printf(dev,"Std Mbuf Failed = %lu\n",
3875 adapter->mbuf_defrag_failed);
3876 device_printf(dev,"Driver dropped packets = %lu\n",
3877 adapter->dropped_pkts);
3878 device_printf(dev, "watchdog timeouts = %ld\n",
3879 adapter->watchdog_events);
3881 device_printf(dev,"Good Packets Rcvd = %llu\n",
3882 (long long)adapter->stats.vfgprc);
3883 device_printf(dev,"Good Packets Xmtd = %llu\n",
3884 (long long)adapter->stats.vfgptc);
3885 device_printf(dev,"TSO Transmissions = %lu\n",
3890 /**********************************************************************
3892 * This routine is called only when em_display_debug_stats is enabled.
3893 * This routine provides a way to take a look at important statistics
3894 * maintained by the driver and hardware.
3896 **********************************************************************/
3898 ixv_print_debug_info(struct adapter *adapter)
3900 device_t dev = adapter->dev;
3901 struct ixgbe_hw *hw = &adapter->hw;
3902 struct ix_queue *que = adapter->queues;
3903 struct rx_ring *rxr;
3904 struct tx_ring *txr;
3905 struct lro_ctrl *lro;
3907 device_printf(dev,"Error Byte Count = %u \n",
3908 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3910 for (int i = 0; i < adapter->num_queues; i++, que++) {
3914 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3915 que->msix, (long)que->irqs);
3916 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3917 rxr->me, (long long)rxr->rx_packets);
3918 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3919 rxr->me, (long long)rxr->rx_split_packets);
3920 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3921 rxr->me, (long)rxr->rx_bytes);
3922 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3923 rxr->me, lro->lro_queued);
3924 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3925 rxr->me, lro->lro_flushed);
3926 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3927 txr->me, (long)txr->total_packets);
3928 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3929 txr->me, (long)txr->no_desc_avail);
3932 device_printf(dev,"MBX IRQ Handled: %lu\n",
3933 (long)adapter->mbx_irq);
3938 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3942 struct adapter *adapter;
3945 error = sysctl_handle_int(oidp, &result, 0, req);
3947 if (error || !req->newptr)
3951 adapter = (struct adapter *) arg1;
3952 ixv_print_hw_stats(adapter);
3958 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3961 struct adapter *adapter;
3964 error = sysctl_handle_int(oidp, &result, 0, req);
3966 if (error || !req->newptr)
3970 adapter = (struct adapter *) arg1;
3971 ixv_print_debug_info(adapter);
3977 ** Set flow control using sysctl:
3978 ** Flow control values:
3985 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3988 struct adapter *adapter;
3990 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3995 adapter = (struct adapter *) arg1;
3996 switch (ixv_flow_control) {
3997 case ixgbe_fc_rx_pause:
3998 case ixgbe_fc_tx_pause:
4000 adapter->hw.fc.requested_mode = ixv_flow_control;
4004 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4007 ixgbe_fc_enable(&adapter->hw, 0);
4012 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4013 const char *description, int *limit, int value)
4016 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4017 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4018 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);