1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
44 *********************************************************************/
45 char ixv_driver_version[] = "1.1.2";
47 /*********************************************************************
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
57 static ixv_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 /* required last entry */
65 /*********************************************************************
66 * Table of branding strings
67 *********************************************************************/
69 static char *ixv_strings[] = {
70 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 /*********************************************************************
75 *********************************************************************/
76 static int ixv_probe(device_t);
77 static int ixv_attach(device_t);
78 static int ixv_detach(device_t);
79 static int ixv_shutdown(device_t);
80 #if __FreeBSD_version < 800000
81 static void ixv_start(struct ifnet *);
82 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
84 static int ixv_mq_start(struct ifnet *, struct mbuf *);
85 static int ixv_mq_start_locked(struct ifnet *,
86 struct tx_ring *, struct mbuf *);
87 static void ixv_qflush(struct ifnet *);
89 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
90 static void ixv_init(void *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_stop(void *);
93 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
94 static int ixv_media_change(struct ifnet *);
95 static void ixv_identify_hardware(struct adapter *);
96 static int ixv_allocate_pci_resources(struct adapter *);
97 static int ixv_allocate_msix(struct adapter *);
98 static int ixv_allocate_queues(struct adapter *);
99 static int ixv_setup_msix(struct adapter *);
100 static void ixv_free_pci_resources(struct adapter *);
101 static void ixv_local_timer(void *);
102 static void ixv_setup_interface(device_t, struct adapter *);
103 static void ixv_config_link(struct adapter *);
105 static int ixv_allocate_transmit_buffers(struct tx_ring *);
106 static int ixv_setup_transmit_structures(struct adapter *);
107 static void ixv_setup_transmit_ring(struct tx_ring *);
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_free_transmit_structures(struct adapter *);
110 static void ixv_free_transmit_buffers(struct tx_ring *);
112 static int ixv_allocate_receive_buffers(struct rx_ring *);
113 static int ixv_setup_receive_structures(struct adapter *);
114 static int ixv_setup_receive_ring(struct rx_ring *);
115 static void ixv_initialize_receive_units(struct adapter *);
116 static void ixv_free_receive_structures(struct adapter *);
117 static void ixv_free_receive_buffers(struct rx_ring *);
119 static void ixv_enable_intr(struct adapter *);
120 static void ixv_disable_intr(struct adapter *);
121 static bool ixv_txeof(struct tx_ring *);
122 static bool ixv_rxeof(struct ix_queue *, int);
123 static void ixv_rx_checksum(u32, struct mbuf *, u32);
124 static void ixv_set_multi(struct adapter *);
125 static void ixv_update_link_status(struct adapter *);
126 static void ixv_refresh_mbufs(struct rx_ring *, int);
127 static int ixv_xmit(struct tx_ring *, struct mbuf **);
128 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
129 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
130 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
131 static int ixv_dma_malloc(struct adapter *, bus_size_t,
132 struct ixv_dma_alloc *, int);
133 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
134 static void ixv_add_rx_process_limit(struct adapter *, const char *,
135 const char *, int *, int);
136 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
137 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
138 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
139 static void ixv_configure_ivars(struct adapter *);
140 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
142 static void ixv_setup_vlan_support(struct adapter *);
143 static void ixv_register_vlan(void *, struct ifnet *, u16);
144 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
146 static void ixv_save_stats(struct adapter *);
147 static void ixv_init_stats(struct adapter *);
148 static void ixv_update_stats(struct adapter *);
150 static __inline void ixv_rx_discard(struct rx_ring *, int);
151 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
154 /* The MSI/X Interrupt handlers */
155 static void ixv_msix_que(void *);
156 static void ixv_msix_mbx(void *);
158 /* Deferred interrupt tasklets */
159 static void ixv_handle_que(void *, int);
160 static void ixv_handle_mbx(void *, int);
162 /*********************************************************************
163 * FreeBSD Device Interface Entry Points
164 *********************************************************************/
166 static device_method_t ixv_methods[] = {
167 /* Device interface */
168 DEVMETHOD(device_probe, ixv_probe),
169 DEVMETHOD(device_attach, ixv_attach),
170 DEVMETHOD(device_detach, ixv_detach),
171 DEVMETHOD(device_shutdown, ixv_shutdown),
175 static driver_t ixv_driver = {
176 "ix", ixv_methods, sizeof(struct adapter),
179 extern devclass_t ixgbe_devclass;
180 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
181 MODULE_DEPEND(ixv, pci, 1, 1, 1);
182 MODULE_DEPEND(ixv, ether, 1, 1, 1);
185 ** TUNEABLE PARAMETERS:
189 ** AIM: Adaptive Interrupt Moderation
190 ** which means that the interrupt rate
191 ** is varied over time based on the
192 ** traffic for that interrupt vector
194 static int ixv_enable_aim = FALSE;
195 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
197 /* How many packets rxeof tries to clean at a time */
198 static int ixv_rx_process_limit = 128;
199 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201 /* Flow control setting, default to full */
202 static int ixv_flow_control = ixgbe_fc_full;
203 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
206 * Header split: this causes the hardware to DMA
207 * the header into a seperate mbuf from the payload,
208 * it can be a performance win in some workloads, but
209 * in others it actually hurts, its off by default.
211 static int ixv_header_split = FALSE;
212 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
215 ** Number of TX descriptors per ring,
216 ** setting higher than RX as this seems
217 ** the better performing choice.
219 static int ixv_txd = DEFAULT_TXD;
220 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
222 /* Number of RX descriptors per ring */
223 static int ixv_rxd = DEFAULT_RXD;
224 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
227 ** Shadow VFTA table, this is needed because
228 ** the real filter table gets cleared during
229 ** a soft reset and we need to repopulate it.
231 static u32 ixv_shadow_vfta[VFTA_SIZE];
233 /*********************************************************************
234 * Device identification routine
236 * ixv_probe determines if the driver should be loaded on
237 * adapter based on PCI vendor/device id of the adapter.
239 * return BUS_PROBE_DEFAULT on success, positive on failure
240 *********************************************************************/
243 ixv_probe(device_t dev)
245 ixv_vendor_info_t *ent;
247 u16 pci_vendor_id = 0;
248 u16 pci_device_id = 0;
249 u16 pci_subvendor_id = 0;
250 u16 pci_subdevice_id = 0;
251 char adapter_name[256];
254 pci_vendor_id = pci_get_vendor(dev);
255 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
258 pci_device_id = pci_get_device(dev);
259 pci_subvendor_id = pci_get_subvendor(dev);
260 pci_subdevice_id = pci_get_subdevice(dev);
262 ent = ixv_vendor_info_array;
263 while (ent->vendor_id != 0) {
264 if ((pci_vendor_id == ent->vendor_id) &&
265 (pci_device_id == ent->device_id) &&
267 ((pci_subvendor_id == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
270 ((pci_subdevice_id == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 sprintf(adapter_name, "%s, Version - %s",
273 ixv_strings[ent->index],
275 device_set_desc_copy(dev, adapter_name);
276 return (BUS_PROBE_DEFAULT);
283 /*********************************************************************
284 * Device initialization routine
286 * The attach entry point is called when the driver is being loaded.
287 * This routine identifies the type of hardware, allocates all resources
288 * and initializes the hardware.
290 * return 0 on success, positive on failure
291 *********************************************************************/
294 ixv_attach(device_t dev)
296 struct adapter *adapter;
300 INIT_DEBUGOUT("ixv_attach: begin");
302 if (resource_disabled("ixgbe", device_get_unit(dev))) {
303 device_printf(dev, "Disabled by device hint\n");
307 /* Allocate, clear, and link in our adapter structure */
308 adapter = device_get_softc(dev);
309 adapter->dev = adapter->osdep.dev = dev;
313 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
316 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
317 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
318 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
319 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
321 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
322 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
323 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
324 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
326 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
329 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
331 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
332 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
333 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
334 &ixv_enable_aim, 1, "Interrupt Moderation");
336 /* Set up the timer callout */
337 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
339 /* Determine hardware revision */
340 ixv_identify_hardware(adapter);
342 /* Do base PCI setup - map BAR0 */
343 if (ixv_allocate_pci_resources(adapter)) {
344 device_printf(dev, "Allocation of PCI resources failed\n");
349 /* Do descriptor calc and sanity checks */
350 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
351 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
352 device_printf(dev, "TXD config issue, using default!\n");
353 adapter->num_tx_desc = DEFAULT_TXD;
355 adapter->num_tx_desc = ixv_txd;
357 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
358 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
359 device_printf(dev, "RXD config issue, using default!\n");
360 adapter->num_rx_desc = DEFAULT_RXD;
362 adapter->num_rx_desc = ixv_rxd;
364 /* Allocate our TX/RX Queues */
365 if (ixv_allocate_queues(adapter)) {
371 ** Initialize the shared code: its
372 ** at this point the mac type is set.
374 error = ixgbe_init_shared_code(hw);
376 device_printf(dev,"Shared Code Initialization Failure\n");
381 /* Setup the mailbox */
382 ixgbe_init_mbx_params_vf(hw);
386 /* Get Hardware Flow Control setting */
387 hw->fc.requested_mode = ixgbe_fc_full;
388 hw->fc.pause_time = IXV_FC_PAUSE;
389 hw->fc.low_water = IXV_FC_LO;
390 hw->fc.high_water[0] = IXV_FC_HI;
391 hw->fc.send_xon = TRUE;
393 error = ixgbe_init_hw(hw);
395 device_printf(dev,"Hardware Initialization Failure\n");
400 error = ixv_allocate_msix(adapter);
404 /* Setup OS specific network interface */
405 ixv_setup_interface(dev, adapter);
407 /* Sysctl for limiting the amount of work done in the taskqueue */
408 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
409 "max number of rx packets to process", &adapter->rx_process_limit,
410 ixv_rx_process_limit);
412 /* Do the stats setup */
413 ixv_save_stats(adapter);
414 ixv_init_stats(adapter);
416 /* Register for VLAN events */
417 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
418 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
419 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
420 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
422 INIT_DEBUGOUT("ixv_attach: end");
426 ixv_free_transmit_structures(adapter);
427 ixv_free_receive_structures(adapter);
429 ixv_free_pci_resources(adapter);
434 /*********************************************************************
435 * Device removal routine
437 * The detach entry point is called when the driver is being removed.
438 * This routine stops the adapter and deallocates all the resources
439 * that were allocated for driver operation.
441 * return 0 on success, positive on failure
442 *********************************************************************/
445 ixv_detach(device_t dev)
447 struct adapter *adapter = device_get_softc(dev);
448 struct ix_queue *que = adapter->queues;
450 INIT_DEBUGOUT("ixv_detach: begin");
452 /* Make sure VLANS are not using driver */
453 if (adapter->ifp->if_vlantrunk != NULL) {
454 device_printf(dev,"Vlan in use, detach first\n");
458 IXV_CORE_LOCK(adapter);
460 IXV_CORE_UNLOCK(adapter);
462 for (int i = 0; i < adapter->num_queues; i++, que++) {
464 taskqueue_drain(que->tq, &que->que_task);
465 taskqueue_free(que->tq);
469 /* Drain the Link queue */
471 taskqueue_drain(adapter->tq, &adapter->mbx_task);
472 taskqueue_free(adapter->tq);
475 /* Unregister VLAN events */
476 if (adapter->vlan_attach != NULL)
477 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
478 if (adapter->vlan_detach != NULL)
479 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
481 ether_ifdetach(adapter->ifp);
482 callout_drain(&adapter->timer);
483 ixv_free_pci_resources(adapter);
484 bus_generic_detach(dev);
485 if_free(adapter->ifp);
487 ixv_free_transmit_structures(adapter);
488 ixv_free_receive_structures(adapter);
490 IXV_CORE_LOCK_DESTROY(adapter);
494 /*********************************************************************
496 * Shutdown entry point
498 **********************************************************************/
500 ixv_shutdown(device_t dev)
502 struct adapter *adapter = device_get_softc(dev);
503 IXV_CORE_LOCK(adapter);
505 IXV_CORE_UNLOCK(adapter);
509 #if __FreeBSD_version < 800000
510 /*********************************************************************
511 * Transmit entry point
513 * ixv_start is called by the stack to initiate a transmit.
514 * The driver will remain in this routine as long as there are
515 * packets to transmit and transmit resources are available.
516 * In case resources are not available stack is notified and
517 * the packet is requeued.
518 **********************************************************************/
520 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
523 struct adapter *adapter = txr->adapter;
525 IXV_TX_LOCK_ASSERT(txr);
527 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
530 if (!adapter->link_active)
533 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
535 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
539 if (ixv_xmit(txr, &m_head)) {
542 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
543 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
546 /* Send a copy of the frame to the BPF listener */
547 ETHER_BPF_MTAP(ifp, m_head);
549 /* Set watchdog on */
550 txr->watchdog_check = TRUE;
551 txr->watchdog_time = ticks;
558 * Legacy TX start - called by the stack, this
559 * always uses the first tx ring, and should
560 * not be used with multiqueue tx enabled.
563 ixv_start(struct ifnet *ifp)
565 struct adapter *adapter = ifp->if_softc;
566 struct tx_ring *txr = adapter->tx_rings;
568 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
570 ixv_start_locked(txr, ifp);
579 ** Multiqueue Transmit driver
583 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
585 struct adapter *adapter = ifp->if_softc;
586 struct ix_queue *que;
590 /* Which queue to use */
591 if ((m->m_flags & M_FLOWID) != 0)
592 i = m->m_pkthdr.flowid % adapter->num_queues;
594 txr = &adapter->tx_rings[i];
595 que = &adapter->queues[i];
597 if (IXV_TX_TRYLOCK(txr)) {
598 err = ixv_mq_start_locked(ifp, txr, m);
601 err = drbr_enqueue(ifp, txr->br, m);
602 taskqueue_enqueue(que->tq, &que->que_task);
609 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
611 struct adapter *adapter = txr->adapter;
613 int enqueued, err = 0;
615 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
616 IFF_DRV_RUNNING || adapter->link_active == 0) {
618 err = drbr_enqueue(ifp, txr->br, m);
622 /* Do a clean if descriptors are low */
623 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
628 next = drbr_dequeue(ifp, txr->br);
629 } else if (drbr_needs_enqueue(ifp, txr->br)) {
630 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
632 next = drbr_dequeue(ifp, txr->br);
636 /* Process the queue */
637 while (next != NULL) {
638 if ((err = ixv_xmit(txr, &next)) != 0) {
640 err = drbr_enqueue(ifp, txr->br, next);
644 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
645 /* Send a copy of the frame to the BPF listener */
646 ETHER_BPF_MTAP(ifp, next);
647 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
649 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
650 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
653 next = drbr_dequeue(ifp, txr->br);
657 /* Set watchdog on */
658 txr->watchdog_check = TRUE;
659 txr->watchdog_time = ticks;
666 ** Flush all ring buffers
669 ixv_qflush(struct ifnet *ifp)
671 struct adapter *adapter = ifp->if_softc;
672 struct tx_ring *txr = adapter->tx_rings;
675 for (int i = 0; i < adapter->num_queues; i++, txr++) {
677 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
686 /*********************************************************************
689 * ixv_ioctl is called when the user wants to configure the
692 * return 0 on success, positive on failure
693 **********************************************************************/
696 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
698 struct adapter *adapter = ifp->if_softc;
699 struct ifreq *ifr = (struct ifreq *) data;
700 #if defined(INET) || defined(INET6)
701 struct ifaddr *ifa = (struct ifaddr *) data;
702 bool avoid_reset = FALSE;
710 if (ifa->ifa_addr->sa_family == AF_INET)
714 if (ifa->ifa_addr->sa_family == AF_INET6)
717 #if defined(INET) || defined(INET6)
719 ** Calling init results in link renegotiation,
720 ** so we avoid doing it when possible.
723 ifp->if_flags |= IFF_UP;
724 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
726 if (!(ifp->if_flags & IFF_NOARP))
727 arp_ifinit(ifp, ifa);
729 error = ether_ioctl(ifp, command, data);
733 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
734 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
737 IXV_CORE_LOCK(adapter);
738 ifp->if_mtu = ifr->ifr_mtu;
739 adapter->max_frame_size =
740 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
741 ixv_init_locked(adapter);
742 IXV_CORE_UNLOCK(adapter);
746 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
747 IXV_CORE_LOCK(adapter);
748 if (ifp->if_flags & IFF_UP) {
749 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
750 ixv_init_locked(adapter);
752 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
754 adapter->if_flags = ifp->if_flags;
755 IXV_CORE_UNLOCK(adapter);
759 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
760 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
761 IXV_CORE_LOCK(adapter);
762 ixv_disable_intr(adapter);
763 ixv_set_multi(adapter);
764 ixv_enable_intr(adapter);
765 IXV_CORE_UNLOCK(adapter);
770 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
771 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
775 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
776 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
777 if (mask & IFCAP_HWCSUM)
778 ifp->if_capenable ^= IFCAP_HWCSUM;
779 if (mask & IFCAP_TSO4)
780 ifp->if_capenable ^= IFCAP_TSO4;
781 if (mask & IFCAP_LRO)
782 ifp->if_capenable ^= IFCAP_LRO;
783 if (mask & IFCAP_VLAN_HWTAGGING)
784 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
785 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
786 IXV_CORE_LOCK(adapter);
787 ixv_init_locked(adapter);
788 IXV_CORE_UNLOCK(adapter);
790 VLAN_CAPABILITIES(ifp);
795 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
796 error = ether_ioctl(ifp, command, data);
803 /*********************************************************************
806 * This routine is used in two ways. It is used by the stack as
807 * init entry point in network interface structure. It is also used
808 * by the driver as a hw/sw initialization routine to get to a
811 * return 0 on success, positive on failure
812 **********************************************************************/
813 #define IXGBE_MHADD_MFS_SHIFT 16
816 ixv_init_locked(struct adapter *adapter)
818 struct ifnet *ifp = adapter->ifp;
819 device_t dev = adapter->dev;
820 struct ixgbe_hw *hw = &adapter->hw;
823 INIT_DEBUGOUT("ixv_init: begin");
824 mtx_assert(&adapter->core_mtx, MA_OWNED);
825 hw->adapter_stopped = FALSE;
826 ixgbe_stop_adapter(hw);
827 callout_stop(&adapter->timer);
829 /* reprogram the RAR[0] in case user changed it. */
830 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
832 /* Get the latest mac address, User can use a LAA */
833 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
834 IXGBE_ETH_LENGTH_OF_ADDRESS);
835 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
836 hw->addr_ctrl.rar_used_count = 1;
838 /* Prepare transmit descriptors and buffers */
839 if (ixv_setup_transmit_structures(adapter)) {
840 device_printf(dev,"Could not setup transmit structures\n");
846 ixv_initialize_transmit_units(adapter);
848 /* Setup Multicast table */
849 ixv_set_multi(adapter);
852 ** Determine the correct mbuf pool
853 ** for doing jumbo/headersplit
855 if (ifp->if_mtu > ETHERMTU)
856 adapter->rx_mbuf_sz = MJUMPAGESIZE;
858 adapter->rx_mbuf_sz = MCLBYTES;
860 /* Prepare receive descriptors and buffers */
861 if (ixv_setup_receive_structures(adapter)) {
862 device_printf(dev,"Could not setup receive structures\n");
867 /* Configure RX settings */
868 ixv_initialize_receive_units(adapter);
870 /* Enable Enhanced MSIX mode */
871 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
872 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
873 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
874 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
876 /* Set the various hardware offload abilities */
877 ifp->if_hwassist = 0;
878 if (ifp->if_capenable & IFCAP_TSO4)
879 ifp->if_hwassist |= CSUM_TSO;
880 if (ifp->if_capenable & IFCAP_TXCSUM) {
881 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
882 #if __FreeBSD_version >= 800000
883 ifp->if_hwassist |= CSUM_SCTP;
888 if (ifp->if_mtu > ETHERMTU) {
889 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
890 mhadd &= ~IXGBE_MHADD_MFS_MASK;
891 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
892 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
895 /* Set up VLAN offload and filter */
896 ixv_setup_vlan_support(adapter);
898 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
900 /* Set up MSI/X routing */
901 ixv_configure_ivars(adapter);
903 /* Set up auto-mask */
904 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
906 /* Set moderation on the Link interrupt */
907 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
910 ixv_init_stats(adapter);
912 /* Config/Enable Link */
913 ixv_config_link(adapter);
915 /* And now turn on interrupts */
916 ixv_enable_intr(adapter);
918 /* Now inform the stack we're ready */
919 ifp->if_drv_flags |= IFF_DRV_RUNNING;
920 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
928 struct adapter *adapter = arg;
930 IXV_CORE_LOCK(adapter);
931 ixv_init_locked(adapter);
932 IXV_CORE_UNLOCK(adapter);
939 ** MSIX Interrupt Handlers and Tasklets
944 ixv_enable_queue(struct adapter *adapter, u32 vector)
946 struct ixgbe_hw *hw = &adapter->hw;
947 u32 queue = 1 << vector;
950 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
951 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
955 ixv_disable_queue(struct adapter *adapter, u32 vector)
957 struct ixgbe_hw *hw = &adapter->hw;
958 u64 queue = (u64)(1 << vector);
961 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
962 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
966 ixv_rearm_queues(struct adapter *adapter, u64 queues)
968 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
969 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
974 ixv_handle_que(void *context, int pending)
976 struct ix_queue *que = context;
977 struct adapter *adapter = que->adapter;
978 struct tx_ring *txr = que->txr;
979 struct ifnet *ifp = adapter->ifp;
982 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
983 more = ixv_rxeof(que, adapter->rx_process_limit);
986 #if __FreeBSD_version >= 800000
987 if (!drbr_empty(ifp, txr->br))
988 ixv_mq_start_locked(ifp, txr, NULL);
990 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
991 ixv_start_locked(txr, ifp);
995 taskqueue_enqueue(que->tq, &que->que_task);
1000 /* Reenable this interrupt */
1001 ixv_enable_queue(adapter, que->msix);
1005 /*********************************************************************
1007 * MSI Queue Interrupt Service routine
1009 **********************************************************************/
1011 ixv_msix_que(void *arg)
1013 struct ix_queue *que = arg;
1014 struct adapter *adapter = que->adapter;
1015 struct tx_ring *txr = que->txr;
1016 struct rx_ring *rxr = que->rxr;
1017 bool more_tx, more_rx;
1020 ixv_disable_queue(adapter, que->msix);
1023 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1026 more_tx = ixv_txeof(txr);
1028 ** Make certain that if the stack
1029 ** has anything queued the task gets
1030 ** scheduled to handle it.
1032 #if __FreeBSD_version < 800000
1033 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1035 if (!drbr_empty(adapter->ifp, txr->br))
1040 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1044 if (ixv_enable_aim == FALSE)
1047 ** Do Adaptive Interrupt Moderation:
1048 ** - Write out last calculated setting
1049 ** - Calculate based on average size over
1050 ** the last interval.
1052 if (que->eitr_setting)
1053 IXGBE_WRITE_REG(&adapter->hw,
1054 IXGBE_VTEITR(que->msix),
1057 que->eitr_setting = 0;
1059 /* Idle, do nothing */
1060 if ((txr->bytes == 0) && (rxr->bytes == 0))
1063 if ((txr->bytes) && (txr->packets))
1064 newitr = txr->bytes/txr->packets;
1065 if ((rxr->bytes) && (rxr->packets))
1066 newitr = max(newitr,
1067 (rxr->bytes / rxr->packets));
1068 newitr += 24; /* account for hardware frame, crc */
1070 /* set an upper boundary */
1071 newitr = min(newitr, 3000);
1073 /* Be nice to the mid range */
1074 if ((newitr > 300) && (newitr < 1200))
1075 newitr = (newitr / 3);
1077 newitr = (newitr / 2);
1079 newitr |= newitr << 16;
1081 /* save for next interrupt */
1082 que->eitr_setting = newitr;
1091 if (more_tx || more_rx)
1092 taskqueue_enqueue(que->tq, &que->que_task);
1093 else /* Reenable this interrupt */
1094 ixv_enable_queue(adapter, que->msix);
1099 ixv_msix_mbx(void *arg)
1101 struct adapter *adapter = arg;
1102 struct ixgbe_hw *hw = &adapter->hw;
1107 /* First get the cause */
1108 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1109 /* Clear interrupt with write */
1110 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1112 /* Link status change */
1113 if (reg & IXGBE_EICR_LSC)
1114 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1116 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1120 /*********************************************************************
1122 * Media Ioctl callback
1124 * This routine is called whenever the user queries the status of
1125 * the interface using ifconfig.
1127 **********************************************************************/
1129 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1131 struct adapter *adapter = ifp->if_softc;
1133 INIT_DEBUGOUT("ixv_media_status: begin");
1134 IXV_CORE_LOCK(adapter);
1135 ixv_update_link_status(adapter);
1137 ifmr->ifm_status = IFM_AVALID;
1138 ifmr->ifm_active = IFM_ETHER;
1140 if (!adapter->link_active) {
1141 IXV_CORE_UNLOCK(adapter);
1145 ifmr->ifm_status |= IFM_ACTIVE;
1147 switch (adapter->link_speed) {
1148 case IXGBE_LINK_SPEED_1GB_FULL:
1149 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1151 case IXGBE_LINK_SPEED_10GB_FULL:
1152 ifmr->ifm_active |= IFM_FDX;
1156 IXV_CORE_UNLOCK(adapter);
1161 /*********************************************************************
1163 * Media Ioctl callback
1165 * This routine is called when the user changes speed/duplex using
1166 * media/mediopt option with ifconfig.
1168 **********************************************************************/
1170 ixv_media_change(struct ifnet * ifp)
1172 struct adapter *adapter = ifp->if_softc;
1173 struct ifmedia *ifm = &adapter->media;
1175 INIT_DEBUGOUT("ixv_media_change: begin");
1177 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1180 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1184 device_printf(adapter->dev, "Only auto media type\n");
1191 /*********************************************************************
1193 * This routine maps the mbufs to tx descriptors, allowing the
1194 * TX engine to transmit the packets.
1195 * - return 0 on success, positive on failure
1197 **********************************************************************/
1200 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1202 struct adapter *adapter = txr->adapter;
1203 u32 olinfo_status = 0, cmd_type_len;
1205 int i, j, error, nsegs;
1206 int first, last = 0;
1207 struct mbuf *m_head;
1208 bus_dma_segment_t segs[32];
1210 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1211 union ixgbe_adv_tx_desc *txd = NULL;
1215 /* Basic descriptor defines */
1216 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1217 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1219 if (m_head->m_flags & M_VLANTAG)
1220 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1223 * Important to capture the first descriptor
1224 * used because it will contain the index of
1225 * the one we tell the hardware to report back
1227 first = txr->next_avail_desc;
1228 txbuf = &txr->tx_buffers[first];
1229 txbuf_mapped = txbuf;
1233 * Map the packet for DMA.
1235 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1236 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1238 if (error == EFBIG) {
1241 m = m_defrag(*m_headp, M_DONTWAIT);
1243 adapter->mbuf_defrag_failed++;
1251 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1252 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1254 if (error == ENOMEM) {
1255 adapter->no_tx_dma_setup++;
1257 } else if (error != 0) {
1258 adapter->no_tx_dma_setup++;
1263 } else if (error == ENOMEM) {
1264 adapter->no_tx_dma_setup++;
1266 } else if (error != 0) {
1267 adapter->no_tx_dma_setup++;
1273 /* Make certain there are enough descriptors */
1274 if (nsegs > txr->tx_avail - 2) {
1275 txr->no_desc_avail++;
1282 ** Set up the appropriate offload context
1283 ** this becomes the first descriptor of
1286 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1287 if (ixv_tso_setup(txr, m_head, &paylen)) {
1288 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1289 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1290 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1291 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1295 } else if (ixv_tx_ctx_setup(txr, m_head))
1296 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1298 /* Record payload length */
1300 olinfo_status |= m_head->m_pkthdr.len <<
1301 IXGBE_ADVTXD_PAYLEN_SHIFT;
1303 i = txr->next_avail_desc;
1304 for (j = 0; j < nsegs; j++) {
1308 txbuf = &txr->tx_buffers[i];
1309 txd = &txr->tx_base[i];
1310 seglen = segs[j].ds_len;
1311 segaddr = htole64(segs[j].ds_addr);
1313 txd->read.buffer_addr = segaddr;
1314 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1315 cmd_type_len |seglen);
1316 txd->read.olinfo_status = htole32(olinfo_status);
1317 last = i; /* descriptor that will get completion IRQ */
1319 if (++i == adapter->num_tx_desc)
1322 txbuf->m_head = NULL;
1323 txbuf->eop_index = -1;
1326 txd->read.cmd_type_len |=
1327 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1328 txr->tx_avail -= nsegs;
1329 txr->next_avail_desc = i;
1331 txbuf->m_head = m_head;
1332 txr->tx_buffers[first].map = txbuf->map;
1334 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1336 /* Set the index of the descriptor that will be marked done */
1337 txbuf = &txr->tx_buffers[first];
1338 txbuf->eop_index = last;
1340 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1341 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1343 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1344 * hardware that this frame is available to transmit.
1346 ++txr->total_packets;
1347 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1352 bus_dmamap_unload(txr->txtag, txbuf->map);
1358 /*********************************************************************
1361 * This routine is called whenever multicast address list is updated.
1363 **********************************************************************/
1364 #define IXGBE_RAR_ENTRIES 16
1367 ixv_set_multi(struct adapter *adapter)
1369 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1371 struct ifmultiaddr *ifma;
1373 struct ifnet *ifp = adapter->ifp;
1375 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1377 #if __FreeBSD_version < 800000
1380 if_maddr_rlock(ifp);
1382 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1383 if (ifma->ifma_addr->sa_family != AF_LINK)
1385 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1386 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1387 IXGBE_ETH_LENGTH_OF_ADDRESS);
1390 #if __FreeBSD_version < 800000
1391 IF_ADDR_UNLOCK(ifp);
1393 if_maddr_runlock(ifp);
1398 ixgbe_update_mc_addr_list(&adapter->hw,
1399 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1405 * This is an iterator function now needed by the multicast
1406 * shared code. It simply feeds the shared code routine the
1407 * addresses in the array of ixv_set_multi() one by one.
1410 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1412 u8 *addr = *update_ptr;
1416 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1417 *update_ptr = newptr;
1421 /*********************************************************************
1424 * This routine checks for link status,updates statistics,
1425 * and runs the watchdog check.
1427 **********************************************************************/
1430 ixv_local_timer(void *arg)
1432 struct adapter *adapter = arg;
1433 device_t dev = adapter->dev;
1434 struct tx_ring *txr = adapter->tx_rings;
1437 mtx_assert(&adapter->core_mtx, MA_OWNED);
1439 ixv_update_link_status(adapter);
1442 ixv_update_stats(adapter);
1445 * If the interface has been paused
1446 * then don't do the watchdog check
1448 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1451 ** Check for time since any descriptor was cleaned
1453 for (i = 0; i < adapter->num_queues; i++, txr++) {
1455 if (txr->watchdog_check == FALSE) {
1459 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1464 ixv_rearm_queues(adapter, adapter->que_mask);
1465 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1469 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1470 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1471 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1472 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1473 device_printf(dev,"TX(%d) desc avail = %d,"
1474 "Next TX to Clean = %d\n",
1475 txr->me, txr->tx_avail, txr->next_to_clean);
1476 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1477 adapter->watchdog_events++;
1479 ixv_init_locked(adapter);
1483 ** Note: this routine updates the OS on the link state
1484 ** the real check of the hardware only happens with
1485 ** a link interrupt.
1488 ixv_update_link_status(struct adapter *adapter)
1490 struct ifnet *ifp = adapter->ifp;
1491 struct tx_ring *txr = adapter->tx_rings;
1492 device_t dev = adapter->dev;
1495 if (adapter->link_up){
1496 if (adapter->link_active == FALSE) {
1498 device_printf(dev,"Link is up %d Gbps %s \n",
1499 ((adapter->link_speed == 128)? 10:1),
1501 adapter->link_active = TRUE;
1502 if_link_state_change(ifp, LINK_STATE_UP);
1504 } else { /* Link down */
1505 if (adapter->link_active == TRUE) {
1507 device_printf(dev,"Link is Down\n");
1508 if_link_state_change(ifp, LINK_STATE_DOWN);
1509 adapter->link_active = FALSE;
1510 for (int i = 0; i < adapter->num_queues;
1512 txr->watchdog_check = FALSE;
1520 /*********************************************************************
1522 * This routine disables all traffic on the adapter by issuing a
1523 * global reset on the MAC and deallocates TX/RX buffers.
1525 **********************************************************************/
1531 struct adapter *adapter = arg;
1532 struct ixgbe_hw *hw = &adapter->hw;
1535 mtx_assert(&adapter->core_mtx, MA_OWNED);
1537 INIT_DEBUGOUT("ixv_stop: begin\n");
1538 ixv_disable_intr(adapter);
1540 /* Tell the stack that the interface is no longer active */
1541 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1544 adapter->hw.adapter_stopped = FALSE;
1545 ixgbe_stop_adapter(hw);
1546 callout_stop(&adapter->timer);
1548 /* reprogram the RAR[0] in case user changed it. */
1549 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1555 /*********************************************************************
1557 * Determine hardware revision.
1559 **********************************************************************/
1561 ixv_identify_hardware(struct adapter *adapter)
1563 device_t dev = adapter->dev;
1567 ** Make sure BUSMASTER is set, on a VM under
1568 ** KVM it may not be and will break things.
1570 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1571 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1572 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1573 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1574 "bits were not set!\n");
1575 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1576 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1579 /* Save off the information about this board */
1580 adapter->hw.vendor_id = pci_get_vendor(dev);
1581 adapter->hw.device_id = pci_get_device(dev);
1582 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1583 adapter->hw.subsystem_vendor_id =
1584 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1585 adapter->hw.subsystem_device_id =
1586 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1591 /*********************************************************************
1593 * Setup MSIX Interrupt resources and handlers
1595 **********************************************************************/
1597 ixv_allocate_msix(struct adapter *adapter)
1599 device_t dev = adapter->dev;
1600 struct ix_queue *que = adapter->queues;
1601 int error, rid, vector = 0;
1603 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1605 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1606 RF_SHAREABLE | RF_ACTIVE);
1607 if (que->res == NULL) {
1608 device_printf(dev,"Unable to allocate"
1609 " bus resource: que interrupt [%d]\n", vector);
1612 /* Set the handler function */
1613 error = bus_setup_intr(dev, que->res,
1614 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1615 ixv_msix_que, que, &que->tag);
1618 device_printf(dev, "Failed to register QUE handler");
1621 #if __FreeBSD_version >= 800504
1622 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1625 adapter->que_mask |= (u64)(1 << que->msix);
1627 ** Bind the msix vector, and thus the
1628 ** ring to the corresponding cpu.
1630 if (adapter->num_queues > 1)
1631 bus_bind_intr(dev, que->res, i);
1633 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1634 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1635 taskqueue_thread_enqueue, &que->tq);
1636 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1637 device_get_nameunit(adapter->dev));
1642 adapter->res = bus_alloc_resource_any(dev,
1643 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1644 if (!adapter->res) {
1645 device_printf(dev,"Unable to allocate"
1646 " bus resource: MBX interrupt [%d]\n", rid);
1649 /* Set the mbx handler function */
1650 error = bus_setup_intr(dev, adapter->res,
1651 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1652 ixv_msix_mbx, adapter, &adapter->tag);
1654 adapter->res = NULL;
1655 device_printf(dev, "Failed to register LINK handler");
1658 #if __FreeBSD_version >= 800504
1659 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1661 adapter->mbxvec = vector;
1662 /* Tasklets for Mailbox */
1663 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1664 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1665 taskqueue_thread_enqueue, &adapter->tq);
1666 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1667 device_get_nameunit(adapter->dev));
1669 ** Due to a broken design QEMU will fail to properly
1670 ** enable the guest for MSIX unless the vectors in
1671 ** the table are all set up, so we must rewrite the
1672 ** ENABLE in the MSIX control register again at this
1673 ** point to cause it to successfully initialize us.
1675 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1677 pci_find_cap(dev, PCIY_MSIX, &rid);
1678 rid += PCIR_MSIX_CTRL;
1679 msix_ctrl = pci_read_config(dev, rid, 2);
1680 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1681 pci_write_config(dev, rid, msix_ctrl, 2);
1688 * Setup MSIX resources, note that the VF
1689 * device MUST use MSIX, there is no fallback.
1692 ixv_setup_msix(struct adapter *adapter)
1694 device_t dev = adapter->dev;
1695 int rid, vectors, want = 2;
1698 /* First try MSI/X */
1700 adapter->msix_mem = bus_alloc_resource_any(dev,
1701 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1702 if (!adapter->msix_mem) {
1703 device_printf(adapter->dev,
1704 "Unable to map MSIX table \n");
1708 vectors = pci_msix_count(dev);
1710 bus_release_resource(dev, SYS_RES_MEMORY,
1711 rid, adapter->msix_mem);
1712 adapter->msix_mem = NULL;
1717 ** Want two vectors: one for a queue,
1718 ** plus an additional for mailbox.
1720 if (pci_alloc_msix(dev, &want) == 0) {
1721 device_printf(adapter->dev,
1722 "Using MSIX interrupts with %d vectors\n", want);
1726 device_printf(adapter->dev,"MSIX config error\n");
1732 ixv_allocate_pci_resources(struct adapter *adapter)
1735 device_t dev = adapter->dev;
1738 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1741 if (!(adapter->pci_mem)) {
1742 device_printf(dev,"Unable to allocate bus resource: memory\n");
1746 adapter->osdep.mem_bus_space_tag =
1747 rman_get_bustag(adapter->pci_mem);
1748 adapter->osdep.mem_bus_space_handle =
1749 rman_get_bushandle(adapter->pci_mem);
1750 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1752 adapter->num_queues = 1;
1753 adapter->hw.back = &adapter->osdep;
1756 ** Now setup MSI/X, should
1757 ** return us the number of
1758 ** configured vectors.
1760 adapter->msix = ixv_setup_msix(adapter);
1761 if (adapter->msix == ENXIO)
1768 ixv_free_pci_resources(struct adapter * adapter)
1770 struct ix_queue *que = adapter->queues;
1771 device_t dev = adapter->dev;
1774 memrid = PCIR_BAR(MSIX_BAR);
1777 ** There is a slight possibility of a failure mode
1778 ** in attach that will result in entering this function
1779 ** before interrupt resources have been initialized, and
1780 ** in that case we do not want to execute the loops below
1781 ** We can detect this reliably by the state of the adapter
1784 if (adapter->res == NULL)
1788 ** Release all msix queue resources:
1790 for (int i = 0; i < adapter->num_queues; i++, que++) {
1791 rid = que->msix + 1;
1792 if (que->tag != NULL) {
1793 bus_teardown_intr(dev, que->res, que->tag);
1796 if (que->res != NULL)
1797 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1801 /* Clean the Legacy or Link interrupt last */
1802 if (adapter->mbxvec) /* we are doing MSIX */
1803 rid = adapter->mbxvec + 1;
1805 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1807 if (adapter->tag != NULL) {
1808 bus_teardown_intr(dev, adapter->res, adapter->tag);
1809 adapter->tag = NULL;
1811 if (adapter->res != NULL)
1812 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1816 pci_release_msi(dev);
1818 if (adapter->msix_mem != NULL)
1819 bus_release_resource(dev, SYS_RES_MEMORY,
1820 memrid, adapter->msix_mem);
1822 if (adapter->pci_mem != NULL)
1823 bus_release_resource(dev, SYS_RES_MEMORY,
1824 PCIR_BAR(0), adapter->pci_mem);
1829 /*********************************************************************
1831 * Setup networking device structure and register an interface.
1833 **********************************************************************/
1835 ixv_setup_interface(device_t dev, struct adapter *adapter)
1839 INIT_DEBUGOUT("ixv_setup_interface: begin");
1841 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1843 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1844 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1845 ifp->if_baudrate = 1000000000;
1846 ifp->if_init = ixv_init;
1847 ifp->if_softc = adapter;
1848 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1849 ifp->if_ioctl = ixv_ioctl;
1850 #if __FreeBSD_version >= 800000
1851 ifp->if_transmit = ixv_mq_start;
1852 ifp->if_qflush = ixv_qflush;
1854 ifp->if_start = ixv_start;
1856 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1858 ether_ifattach(ifp, adapter->hw.mac.addr);
1860 adapter->max_frame_size =
1861 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1864 * Tell the upper layer(s) we support long frames.
1866 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1868 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1869 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1870 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1873 ifp->if_capenable = ifp->if_capabilities;
1875 /* Don't enable LRO by default */
1876 ifp->if_capabilities |= IFCAP_LRO;
1879 * Specify the media types supported by this adapter and register
1880 * callbacks to update media and link information
1882 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1884 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1885 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1886 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1892 ixv_config_link(struct adapter *adapter)
1894 struct ixgbe_hw *hw = &adapter->hw;
1895 u32 autoneg, err = 0;
1896 bool negotiate = TRUE;
1898 if (hw->mac.ops.check_link)
1899 err = hw->mac.ops.check_link(hw, &autoneg,
1900 &adapter->link_up, FALSE);
1904 if (hw->mac.ops.setup_link)
1905 err = hw->mac.ops.setup_link(hw, autoneg,
1906 negotiate, adapter->link_up);
1911 /********************************************************************
1912 * Manage DMA'able memory.
1913 *******************************************************************/
1915 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1919 *(bus_addr_t *) arg = segs->ds_addr;
1924 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1925 struct ixv_dma_alloc *dma, int mapflags)
1927 device_t dev = adapter->dev;
1930 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1931 DBA_ALIGN, 0, /* alignment, bounds */
1932 BUS_SPACE_MAXADDR, /* lowaddr */
1933 BUS_SPACE_MAXADDR, /* highaddr */
1934 NULL, NULL, /* filter, filterarg */
1937 size, /* maxsegsize */
1938 BUS_DMA_ALLOCNOW, /* flags */
1939 NULL, /* lockfunc */
1940 NULL, /* lockfuncarg */
1943 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1947 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1948 BUS_DMA_NOWAIT, &dma->dma_map);
1950 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1954 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1958 mapflags | BUS_DMA_NOWAIT);
1960 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1964 dma->dma_size = size;
1967 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1969 bus_dma_tag_destroy(dma->dma_tag);
1971 dma->dma_map = NULL;
1972 dma->dma_tag = NULL;
1977 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1979 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1980 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1981 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1982 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1983 bus_dma_tag_destroy(dma->dma_tag);
1987 /*********************************************************************
1989 * Allocate memory for the transmit and receive rings, and then
1990 * the descriptors associated with each, called only once at attach.
1992 **********************************************************************/
1994 ixv_allocate_queues(struct adapter *adapter)
1996 device_t dev = adapter->dev;
1997 struct ix_queue *que;
1998 struct tx_ring *txr;
1999 struct rx_ring *rxr;
2000 int rsize, tsize, error = 0;
2001 int txconf = 0, rxconf = 0;
2003 /* First allocate the top level queue structs */
2004 if (!(adapter->queues =
2005 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2006 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2007 device_printf(dev, "Unable to allocate queue memory\n");
2012 /* First allocate the TX ring struct memory */
2013 if (!(adapter->tx_rings =
2014 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2015 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2016 device_printf(dev, "Unable to allocate TX ring memory\n");
2021 /* Next allocate the RX */
2022 if (!(adapter->rx_rings =
2023 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2024 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2025 device_printf(dev, "Unable to allocate RX ring memory\n");
2030 /* For the ring itself */
2031 tsize = roundup2(adapter->num_tx_desc *
2032 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2035 * Now set up the TX queues, txconf is needed to handle the
2036 * possibility that things fail midcourse and we need to
2037 * undo memory gracefully
2039 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2040 /* Set up some basics */
2041 txr = &adapter->tx_rings[i];
2042 txr->adapter = adapter;
2045 /* Initialize the TX side lock */
2046 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2047 device_get_nameunit(dev), txr->me);
2048 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2050 if (ixv_dma_malloc(adapter, tsize,
2051 &txr->txdma, BUS_DMA_NOWAIT)) {
2053 "Unable to allocate TX Descriptor memory\n");
2057 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2058 bzero((void *)txr->tx_base, tsize);
2060 /* Now allocate transmit buffers for the ring */
2061 if (ixv_allocate_transmit_buffers(txr)) {
2063 "Critical Failure setting up transmit buffers\n");
2067 #if __FreeBSD_version >= 800000
2068 /* Allocate a buf ring */
2069 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2070 M_WAITOK, &txr->tx_mtx);
2071 if (txr->br == NULL) {
2073 "Critical Failure setting up buf ring\n");
2081 * Next the RX queues...
2083 rsize = roundup2(adapter->num_rx_desc *
2084 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2085 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2086 rxr = &adapter->rx_rings[i];
2087 /* Set up some basics */
2088 rxr->adapter = adapter;
2091 /* Initialize the RX side lock */
2092 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2093 device_get_nameunit(dev), rxr->me);
2094 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2096 if (ixv_dma_malloc(adapter, rsize,
2097 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2099 "Unable to allocate RxDescriptor memory\n");
2103 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2104 bzero((void *)rxr->rx_base, rsize);
2106 /* Allocate receive buffers for the ring*/
2107 if (ixv_allocate_receive_buffers(rxr)) {
2109 "Critical Failure setting up receive buffers\n");
2116 ** Finally set up the queue holding structs
2118 for (int i = 0; i < adapter->num_queues; i++) {
2119 que = &adapter->queues[i];
2120 que->adapter = adapter;
2121 que->txr = &adapter->tx_rings[i];
2122 que->rxr = &adapter->rx_rings[i];
2128 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2129 ixv_dma_free(adapter, &rxr->rxdma);
2131 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2132 ixv_dma_free(adapter, &txr->txdma);
2133 free(adapter->rx_rings, M_DEVBUF);
2135 free(adapter->tx_rings, M_DEVBUF);
2137 free(adapter->queues, M_DEVBUF);
2143 /*********************************************************************
2145 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2146 * the information needed to transmit a packet on the wire. This is
2147 * called only once at attach, setup is done every reset.
2149 **********************************************************************/
2151 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2153 struct adapter *adapter = txr->adapter;
2154 device_t dev = adapter->dev;
2155 struct ixv_tx_buf *txbuf;
2159 * Setup DMA descriptor areas.
2161 if ((error = bus_dma_tag_create(NULL, /* parent */
2162 1, 0, /* alignment, bounds */
2163 BUS_SPACE_MAXADDR, /* lowaddr */
2164 BUS_SPACE_MAXADDR, /* highaddr */
2165 NULL, NULL, /* filter, filterarg */
2166 IXV_TSO_SIZE, /* maxsize */
2168 PAGE_SIZE, /* maxsegsize */
2170 NULL, /* lockfunc */
2171 NULL, /* lockfuncarg */
2173 device_printf(dev,"Unable to allocate TX DMA tag\n");
2177 if (!(txr->tx_buffers =
2178 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2179 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2180 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2185 /* Create the descriptor buffer dma maps */
2186 txbuf = txr->tx_buffers;
2187 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2188 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2190 device_printf(dev, "Unable to create TX DMA map\n");
2197 /* We free all, it handles case where we are in the middle */
2198 ixv_free_transmit_structures(adapter);
2202 /*********************************************************************
2204 * Initialize a transmit ring.
2206 **********************************************************************/
2208 ixv_setup_transmit_ring(struct tx_ring *txr)
2210 struct adapter *adapter = txr->adapter;
2211 struct ixv_tx_buf *txbuf;
2214 /* Clear the old ring contents */
2216 bzero((void *)txr->tx_base,
2217 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2219 txr->next_avail_desc = 0;
2220 txr->next_to_clean = 0;
2222 /* Free any existing tx buffers. */
2223 txbuf = txr->tx_buffers;
2224 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2225 if (txbuf->m_head != NULL) {
2226 bus_dmamap_sync(txr->txtag, txbuf->map,
2227 BUS_DMASYNC_POSTWRITE);
2228 bus_dmamap_unload(txr->txtag, txbuf->map);
2229 m_freem(txbuf->m_head);
2230 txbuf->m_head = NULL;
2232 /* Clear the EOP index */
2233 txbuf->eop_index = -1;
2236 /* Set number of descriptors available */
2237 txr->tx_avail = adapter->num_tx_desc;
2239 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2240 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2244 /*********************************************************************
2246 * Initialize all transmit rings.
2248 **********************************************************************/
2250 ixv_setup_transmit_structures(struct adapter *adapter)
2252 struct tx_ring *txr = adapter->tx_rings;
2254 for (int i = 0; i < adapter->num_queues; i++, txr++)
2255 ixv_setup_transmit_ring(txr);
2260 /*********************************************************************
2262 * Enable transmit unit.
2264 **********************************************************************/
2266 ixv_initialize_transmit_units(struct adapter *adapter)
2268 struct tx_ring *txr = adapter->tx_rings;
2269 struct ixgbe_hw *hw = &adapter->hw;
2272 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2273 u64 tdba = txr->txdma.dma_paddr;
2276 /* Set WTHRESH to 8, burst writeback */
2277 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2278 txdctl |= (8 << 16);
2279 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2281 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2282 txdctl |= IXGBE_TXDCTL_ENABLE;
2283 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2285 /* Set the HW Tx Head and Tail indices */
2286 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2287 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2289 /* Setup Transmit Descriptor Cmd Settings */
2290 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2291 txr->watchdog_check = FALSE;
2293 /* Set Ring parameters */
2294 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2295 (tdba & 0x00000000ffffffffULL));
2296 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2297 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2298 adapter->num_tx_desc *
2299 sizeof(struct ixgbe_legacy_tx_desc));
2300 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2301 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2302 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2309 /*********************************************************************
2311 * Free all transmit rings.
2313 **********************************************************************/
2315 ixv_free_transmit_structures(struct adapter *adapter)
2317 struct tx_ring *txr = adapter->tx_rings;
2319 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2321 ixv_free_transmit_buffers(txr);
2322 ixv_dma_free(adapter, &txr->txdma);
2324 IXV_TX_LOCK_DESTROY(txr);
2326 free(adapter->tx_rings, M_DEVBUF);
2329 /*********************************************************************
2331 * Free transmit ring related data structures.
2333 **********************************************************************/
2335 ixv_free_transmit_buffers(struct tx_ring *txr)
2337 struct adapter *adapter = txr->adapter;
2338 struct ixv_tx_buf *tx_buffer;
2341 INIT_DEBUGOUT("free_transmit_ring: begin");
2343 if (txr->tx_buffers == NULL)
2346 tx_buffer = txr->tx_buffers;
2347 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2348 if (tx_buffer->m_head != NULL) {
2349 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2350 BUS_DMASYNC_POSTWRITE);
2351 bus_dmamap_unload(txr->txtag,
2353 m_freem(tx_buffer->m_head);
2354 tx_buffer->m_head = NULL;
2355 if (tx_buffer->map != NULL) {
2356 bus_dmamap_destroy(txr->txtag,
2358 tx_buffer->map = NULL;
2360 } else if (tx_buffer->map != NULL) {
2361 bus_dmamap_unload(txr->txtag,
2363 bus_dmamap_destroy(txr->txtag,
2365 tx_buffer->map = NULL;
2368 #if __FreeBSD_version >= 800000
2369 if (txr->br != NULL)
2370 buf_ring_free(txr->br, M_DEVBUF);
2372 if (txr->tx_buffers != NULL) {
2373 free(txr->tx_buffers, M_DEVBUF);
2374 txr->tx_buffers = NULL;
2376 if (txr->txtag != NULL) {
2377 bus_dma_tag_destroy(txr->txtag);
2383 /*********************************************************************
2385 * Advanced Context Descriptor setup for VLAN or CSUM
2387 **********************************************************************/
2390 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2392 struct adapter *adapter = txr->adapter;
2393 struct ixgbe_adv_tx_context_desc *TXD;
2394 struct ixv_tx_buf *tx_buffer;
2395 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2396 struct ether_vlan_header *eh;
2398 struct ip6_hdr *ip6;
2399 int ehdrlen, ip_hlen = 0;
2402 bool offload = TRUE;
2403 int ctxd = txr->next_avail_desc;
2407 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2411 tx_buffer = &txr->tx_buffers[ctxd];
2412 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2415 ** In advanced descriptors the vlan tag must
2416 ** be placed into the descriptor itself.
2418 if (mp->m_flags & M_VLANTAG) {
2419 vtag = htole16(mp->m_pkthdr.ether_vtag);
2420 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2421 } else if (offload == FALSE)
2425 * Determine where frame payload starts.
2426 * Jump over vlan headers if already present,
2427 * helpful for QinQ too.
2429 eh = mtod(mp, struct ether_vlan_header *);
2430 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2431 etype = ntohs(eh->evl_proto);
2432 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2434 etype = ntohs(eh->evl_encap_proto);
2435 ehdrlen = ETHER_HDR_LEN;
2438 /* Set the ether header length */
2439 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2443 ip = (struct ip *)(mp->m_data + ehdrlen);
2444 ip_hlen = ip->ip_hl << 2;
2445 if (mp->m_len < ehdrlen + ip_hlen)
2448 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2450 case ETHERTYPE_IPV6:
2451 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2452 ip_hlen = sizeof(struct ip6_hdr);
2453 if (mp->m_len < ehdrlen + ip_hlen)
2455 ipproto = ip6->ip6_nxt;
2456 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2463 vlan_macip_lens |= ip_hlen;
2464 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2468 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2469 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2473 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2474 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2477 #if __FreeBSD_version >= 800000
2479 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2480 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2488 /* Now copy bits into descriptor */
2489 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2490 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2491 TXD->seqnum_seed = htole32(0);
2492 TXD->mss_l4len_idx = htole32(0);
2494 tx_buffer->m_head = NULL;
2495 tx_buffer->eop_index = -1;
2497 /* We've consumed the first desc, adjust counters */
2498 if (++ctxd == adapter->num_tx_desc)
2500 txr->next_avail_desc = ctxd;
2506 /**********************************************************************
2508 * Setup work for hardware segmentation offload (TSO) on
2509 * adapters using advanced tx descriptors
2511 **********************************************************************/
2513 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2515 struct adapter *adapter = txr->adapter;
2516 struct ixgbe_adv_tx_context_desc *TXD;
2517 struct ixv_tx_buf *tx_buffer;
2518 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2519 u32 mss_l4len_idx = 0;
2521 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2522 struct ether_vlan_header *eh;
2528 * Determine where frame payload starts.
2529 * Jump over vlan headers if already present
2531 eh = mtod(mp, struct ether_vlan_header *);
2532 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2533 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2535 ehdrlen = ETHER_HDR_LEN;
2537 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2538 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2541 ctxd = txr->next_avail_desc;
2542 tx_buffer = &txr->tx_buffers[ctxd];
2543 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2545 ip = (struct ip *)(mp->m_data + ehdrlen);
2546 if (ip->ip_p != IPPROTO_TCP)
2547 return FALSE; /* 0 */
2549 ip_hlen = ip->ip_hl << 2;
2550 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2551 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2552 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2553 tcp_hlen = th->th_off << 2;
2554 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2556 /* This is used in the transmit desc in encap */
2557 *paylen = mp->m_pkthdr.len - hdrlen;
2559 /* VLAN MACLEN IPLEN */
2560 if (mp->m_flags & M_VLANTAG) {
2561 vtag = htole16(mp->m_pkthdr.ether_vtag);
2562 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2565 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2566 vlan_macip_lens |= ip_hlen;
2567 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2569 /* ADV DTYPE TUCMD */
2570 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2571 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2572 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2573 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2577 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2578 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2579 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2581 TXD->seqnum_seed = htole32(0);
2582 tx_buffer->m_head = NULL;
2583 tx_buffer->eop_index = -1;
2585 if (++ctxd == adapter->num_tx_desc)
2589 txr->next_avail_desc = ctxd;
2594 /**********************************************************************
2596 * Examine each tx_buffer in the used queue. If the hardware is done
2597 * processing the packet then free associated resources. The
2598 * tx_buffer is put back on the free queue.
2600 **********************************************************************/
2602 ixv_txeof(struct tx_ring *txr)
2604 struct adapter *adapter = txr->adapter;
2605 struct ifnet *ifp = adapter->ifp;
2606 u32 first, last, done;
2607 struct ixv_tx_buf *tx_buffer;
2608 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2610 mtx_assert(&txr->tx_mtx, MA_OWNED);
2612 if (txr->tx_avail == adapter->num_tx_desc)
2615 first = txr->next_to_clean;
2616 tx_buffer = &txr->tx_buffers[first];
2617 /* For cleanup we just use legacy struct */
2618 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2619 last = tx_buffer->eop_index;
2622 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2625 ** Get the index of the first descriptor
2626 ** BEYOND the EOP and call that 'done'.
2627 ** I do this so the comparison in the
2628 ** inner while loop below can be simple
2630 if (++last == adapter->num_tx_desc) last = 0;
2633 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2634 BUS_DMASYNC_POSTREAD);
2636 ** Only the EOP descriptor of a packet now has the DD
2637 ** bit set, this is what we look for...
2639 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2640 /* We clean the range of the packet */
2641 while (first != done) {
2642 tx_desc->upper.data = 0;
2643 tx_desc->lower.data = 0;
2644 tx_desc->buffer_addr = 0;
2647 if (tx_buffer->m_head) {
2648 bus_dmamap_sync(txr->txtag,
2650 BUS_DMASYNC_POSTWRITE);
2651 bus_dmamap_unload(txr->txtag,
2653 m_freem(tx_buffer->m_head);
2654 tx_buffer->m_head = NULL;
2655 tx_buffer->map = NULL;
2657 tx_buffer->eop_index = -1;
2658 txr->watchdog_time = ticks;
2660 if (++first == adapter->num_tx_desc)
2663 tx_buffer = &txr->tx_buffers[first];
2665 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2668 /* See if there is more work now */
2669 last = tx_buffer->eop_index;
2672 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2673 /* Get next done point */
2674 if (++last == adapter->num_tx_desc) last = 0;
2679 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2682 txr->next_to_clean = first;
2685 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2686 * it is OK to send packets. If there are no pending descriptors,
2687 * clear the timeout. Otherwise, if some descriptors have been freed,
2688 * restart the timeout.
2690 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2691 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2692 if (txr->tx_avail == adapter->num_tx_desc) {
2693 txr->watchdog_check = FALSE;
2701 /*********************************************************************
2703 * Refresh mbuf buffers for RX descriptor rings
2704 * - now keeps its own state so discards due to resource
2705 * exhaustion are unnecessary, if an mbuf cannot be obtained
2706 * it just returns, keeping its placeholder, thus it can simply
2707 * be recalled to try again.
2709 **********************************************************************/
2711 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2713 struct adapter *adapter = rxr->adapter;
2714 bus_dma_segment_t hseg[1];
2715 bus_dma_segment_t pseg[1];
2716 struct ixv_rx_buf *rxbuf;
2717 struct mbuf *mh, *mp;
2718 int i, j, nsegs, error;
2719 bool refreshed = FALSE;
2721 i = j = rxr->next_to_refresh;
2722 /* Get the control variable, one beyond refresh point */
2723 if (++j == adapter->num_rx_desc)
2725 while (j != limit) {
2726 rxbuf = &rxr->rx_buffers[i];
2727 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2728 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2731 mh->m_pkthdr.len = mh->m_len = MHLEN;
2733 mh->m_flags |= M_PKTHDR;
2734 m_adj(mh, ETHER_ALIGN);
2735 /* Get the memory mapping */
2736 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2737 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2739 printf("GET BUF: dmamap load"
2740 " failure - %d\n", error);
2745 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2746 BUS_DMASYNC_PREREAD);
2747 rxr->rx_base[i].read.hdr_addr =
2748 htole64(hseg[0].ds_addr);
2751 if (rxbuf->m_pack == NULL) {
2752 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2753 M_PKTHDR, adapter->rx_mbuf_sz);
2759 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2760 /* Get the memory mapping */
2761 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2762 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2764 printf("GET BUF: dmamap load"
2765 " failure - %d\n", error);
2767 rxbuf->m_pack = NULL;
2771 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2772 BUS_DMASYNC_PREREAD);
2773 rxr->rx_base[i].read.pkt_addr =
2774 htole64(pseg[0].ds_addr);
2777 rxr->next_to_refresh = i = j;
2778 /* Calculate next index */
2779 if (++j == adapter->num_rx_desc)
2783 if (refreshed) /* update tail index */
2784 IXGBE_WRITE_REG(&adapter->hw,
2785 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2789 /*********************************************************************
2791 * Allocate memory for rx_buffer structures. Since we use one
2792 * rx_buffer per received packet, the maximum number of rx_buffer's
2793 * that we'll need is equal to the number of receive descriptors
2794 * that we've allocated.
2796 **********************************************************************/
2798 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2800 struct adapter *adapter = rxr->adapter;
2801 device_t dev = adapter->dev;
2802 struct ixv_rx_buf *rxbuf;
2803 int i, bsize, error;
2805 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2806 if (!(rxr->rx_buffers =
2807 (struct ixv_rx_buf *) malloc(bsize,
2808 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2809 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2814 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2815 1, 0, /* alignment, bounds */
2816 BUS_SPACE_MAXADDR, /* lowaddr */
2817 BUS_SPACE_MAXADDR, /* highaddr */
2818 NULL, NULL, /* filter, filterarg */
2819 MSIZE, /* maxsize */
2821 MSIZE, /* maxsegsize */
2823 NULL, /* lockfunc */
2824 NULL, /* lockfuncarg */
2826 device_printf(dev, "Unable to create RX DMA tag\n");
2830 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2831 1, 0, /* alignment, bounds */
2832 BUS_SPACE_MAXADDR, /* lowaddr */
2833 BUS_SPACE_MAXADDR, /* highaddr */
2834 NULL, NULL, /* filter, filterarg */
2835 MJUMPAGESIZE, /* maxsize */
2837 MJUMPAGESIZE, /* maxsegsize */
2839 NULL, /* lockfunc */
2840 NULL, /* lockfuncarg */
2842 device_printf(dev, "Unable to create RX DMA tag\n");
2846 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2847 rxbuf = &rxr->rx_buffers[i];
2848 error = bus_dmamap_create(rxr->htag,
2849 BUS_DMA_NOWAIT, &rxbuf->hmap);
2851 device_printf(dev, "Unable to create RX head map\n");
2854 error = bus_dmamap_create(rxr->ptag,
2855 BUS_DMA_NOWAIT, &rxbuf->pmap);
2857 device_printf(dev, "Unable to create RX pkt map\n");
2865 /* Frees all, but can handle partial completion */
2866 ixv_free_receive_structures(adapter);
2871 ixv_free_receive_ring(struct rx_ring *rxr)
2873 struct adapter *adapter;
2874 struct ixv_rx_buf *rxbuf;
2877 adapter = rxr->adapter;
2878 for (i = 0; i < adapter->num_rx_desc; i++) {
2879 rxbuf = &rxr->rx_buffers[i];
2880 if (rxbuf->m_head != NULL) {
2881 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2882 BUS_DMASYNC_POSTREAD);
2883 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2884 rxbuf->m_head->m_flags |= M_PKTHDR;
2885 m_freem(rxbuf->m_head);
2887 if (rxbuf->m_pack != NULL) {
2888 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2889 BUS_DMASYNC_POSTREAD);
2890 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2891 rxbuf->m_pack->m_flags |= M_PKTHDR;
2892 m_freem(rxbuf->m_pack);
2894 rxbuf->m_head = NULL;
2895 rxbuf->m_pack = NULL;
2900 /*********************************************************************
2902 * Initialize a receive ring and its buffers.
2904 **********************************************************************/
2906 ixv_setup_receive_ring(struct rx_ring *rxr)
2908 struct adapter *adapter;
2911 struct ixv_rx_buf *rxbuf;
2912 bus_dma_segment_t pseg[1], hseg[1];
2913 struct lro_ctrl *lro = &rxr->lro;
2914 int rsize, nsegs, error = 0;
2916 adapter = rxr->adapter;
2920 /* Clear the ring contents */
2922 rsize = roundup2(adapter->num_rx_desc *
2923 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2924 bzero((void *)rxr->rx_base, rsize);
2926 /* Free current RX buffer structs and their mbufs */
2927 ixv_free_receive_ring(rxr);
2929 /* Configure header split? */
2930 if (ixv_header_split)
2931 rxr->hdr_split = TRUE;
2933 /* Now replenish the mbufs */
2934 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2935 struct mbuf *mh, *mp;
2937 rxbuf = &rxr->rx_buffers[j];
2939 ** Dont allocate mbufs if not
2940 ** doing header split, its wasteful
2942 if (rxr->hdr_split == FALSE)
2945 /* First the header */
2946 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2947 if (rxbuf->m_head == NULL) {
2951 m_adj(rxbuf->m_head, ETHER_ALIGN);
2953 mh->m_len = mh->m_pkthdr.len = MHLEN;
2954 mh->m_flags |= M_PKTHDR;
2955 /* Get the memory mapping */
2956 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2957 rxbuf->hmap, rxbuf->m_head, hseg,
2958 &nsegs, BUS_DMA_NOWAIT);
2959 if (error != 0) /* Nothing elegant to do here */
2961 bus_dmamap_sync(rxr->htag,
2962 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2963 /* Update descriptor */
2964 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2967 /* Now the payload cluster */
2968 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2969 M_PKTHDR, adapter->rx_mbuf_sz);
2970 if (rxbuf->m_pack == NULL) {
2975 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2976 /* Get the memory mapping */
2977 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2978 rxbuf->pmap, mp, pseg,
2979 &nsegs, BUS_DMA_NOWAIT);
2982 bus_dmamap_sync(rxr->ptag,
2983 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2984 /* Update descriptor */
2985 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2989 /* Setup our descriptor indices */
2990 rxr->next_to_check = 0;
2991 rxr->next_to_refresh = 0;
2992 rxr->lro_enabled = FALSE;
2993 rxr->rx_split_packets = 0;
2995 rxr->discard = FALSE;
2997 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2998 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3001 ** Now set up the LRO interface:
3003 if (ifp->if_capenable & IFCAP_LRO) {
3004 int err = tcp_lro_init(lro);
3006 device_printf(dev, "LRO Initialization failed!\n");
3009 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3010 rxr->lro_enabled = TRUE;
3011 lro->ifp = adapter->ifp;
3018 ixv_free_receive_ring(rxr);
3023 /*********************************************************************
3025 * Initialize all receive rings.
3027 **********************************************************************/
3029 ixv_setup_receive_structures(struct adapter *adapter)
3031 struct rx_ring *rxr = adapter->rx_rings;
3034 for (j = 0; j < adapter->num_queues; j++, rxr++)
3035 if (ixv_setup_receive_ring(rxr))
3041 * Free RX buffers allocated so far, we will only handle
3042 * the rings that completed, the failing case will have
3043 * cleaned up for itself. 'j' failed, so its the terminus.
3045 for (int i = 0; i < j; ++i) {
3046 rxr = &adapter->rx_rings[i];
3047 ixv_free_receive_ring(rxr);
3053 /*********************************************************************
3055 * Setup receive registers and features.
3057 **********************************************************************/
3058 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3061 ixv_initialize_receive_units(struct adapter *adapter)
3063 struct rx_ring *rxr = adapter->rx_rings;
3064 struct ixgbe_hw *hw = &adapter->hw;
3065 struct ifnet *ifp = adapter->ifp;
3066 u32 bufsz, fctrl, rxcsum, hlreg;
3069 /* Enable broadcasts */
3070 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3071 fctrl |= IXGBE_FCTRL_BAM;
3072 fctrl |= IXGBE_FCTRL_DPF;
3073 fctrl |= IXGBE_FCTRL_PMCF;
3074 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3076 /* Set for Jumbo Frames? */
3077 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3078 if (ifp->if_mtu > ETHERMTU) {
3079 hlreg |= IXGBE_HLREG0_JUMBOEN;
3080 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3082 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3083 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3085 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3087 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3088 u64 rdba = rxr->rxdma.dma_paddr;
3091 /* Do the queue enabling first */
3092 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3093 rxdctl |= IXGBE_RXDCTL_ENABLE;
3094 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3095 for (int k = 0; k < 10; k++) {
3096 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3097 IXGBE_RXDCTL_ENABLE)
3104 /* Setup the Base and Length of the Rx Descriptor Ring */
3105 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3106 (rdba & 0x00000000ffffffffULL));
3107 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3109 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3110 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3112 /* Set up the SRRCTL register */
3113 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3114 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3115 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3117 if (rxr->hdr_split) {
3118 /* Use a standard mbuf for the header */
3119 reg |= ((IXV_RX_HDR <<
3120 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3121 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3122 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3124 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3125 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3127 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3128 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3129 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3130 adapter->num_rx_desc - 1);
3133 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3135 if (ifp->if_capenable & IFCAP_RXCSUM)
3136 rxcsum |= IXGBE_RXCSUM_PCSD;
3138 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3139 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3141 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3146 /*********************************************************************
3148 * Free all receive rings.
3150 **********************************************************************/
3152 ixv_free_receive_structures(struct adapter *adapter)
3154 struct rx_ring *rxr = adapter->rx_rings;
3156 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3157 struct lro_ctrl *lro = &rxr->lro;
3158 ixv_free_receive_buffers(rxr);
3159 /* Free LRO memory */
3161 /* Free the ring memory as well */
3162 ixv_dma_free(adapter, &rxr->rxdma);
3165 free(adapter->rx_rings, M_DEVBUF);
3169 /*********************************************************************
3171 * Free receive ring data structures
3173 **********************************************************************/
3175 ixv_free_receive_buffers(struct rx_ring *rxr)
3177 struct adapter *adapter = rxr->adapter;
3178 struct ixv_rx_buf *rxbuf;
3180 INIT_DEBUGOUT("free_receive_structures: begin");
3182 /* Cleanup any existing buffers */
3183 if (rxr->rx_buffers != NULL) {
3184 for (int i = 0; i < adapter->num_rx_desc; i++) {
3185 rxbuf = &rxr->rx_buffers[i];
3186 if (rxbuf->m_head != NULL) {
3187 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3188 BUS_DMASYNC_POSTREAD);
3189 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3190 rxbuf->m_head->m_flags |= M_PKTHDR;
3191 m_freem(rxbuf->m_head);
3193 if (rxbuf->m_pack != NULL) {
3194 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3195 BUS_DMASYNC_POSTREAD);
3196 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3197 rxbuf->m_pack->m_flags |= M_PKTHDR;
3198 m_freem(rxbuf->m_pack);
3200 rxbuf->m_head = NULL;
3201 rxbuf->m_pack = NULL;
3202 if (rxbuf->hmap != NULL) {
3203 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3206 if (rxbuf->pmap != NULL) {
3207 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3211 if (rxr->rx_buffers != NULL) {
3212 free(rxr->rx_buffers, M_DEVBUF);
3213 rxr->rx_buffers = NULL;
3217 if (rxr->htag != NULL) {
3218 bus_dma_tag_destroy(rxr->htag);
3221 if (rxr->ptag != NULL) {
3222 bus_dma_tag_destroy(rxr->ptag);
3229 static __inline void
3230 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3234 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3235 * should be computed by hardware. Also it should not have VLAN tag in
3238 if (rxr->lro_enabled &&
3239 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3240 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3241 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3242 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3243 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3244 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3246 * Send to the stack if:
3247 ** - LRO not enabled, or
3248 ** - no LRO resources, or
3249 ** - lro enqueue fails
3251 if (rxr->lro.lro_cnt != 0)
3252 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3256 (*ifp->if_input)(ifp, m);
3260 static __inline void
3261 ixv_rx_discard(struct rx_ring *rxr, int i)
3263 struct ixv_rx_buf *rbuf;
3265 rbuf = &rxr->rx_buffers[i];
3267 if (rbuf->fmp != NULL) {/* Partial chain ? */
3268 rbuf->fmp->m_flags |= M_PKTHDR;
3274 ** With advanced descriptors the writeback
3275 ** clobbers the buffer addrs, so its easier
3276 ** to just free the existing mbufs and take
3277 ** the normal refresh path to get new buffers
3281 m_free(rbuf->m_head);
3282 rbuf->m_head = NULL;
3286 m_free(rbuf->m_pack);
3287 rbuf->m_pack = NULL;
3294 /*********************************************************************
3296 * This routine executes in interrupt context. It replenishes
3297 * the mbufs in the descriptor and sends data which has been
3298 * dma'ed into host memory to upper layer.
3300 * We loop at most count times if count is > 0, or until done if
3303 * Return TRUE for more work, FALSE for all clean.
3304 *********************************************************************/
3306 ixv_rxeof(struct ix_queue *que, int count)
3308 struct adapter *adapter = que->adapter;
3309 struct rx_ring *rxr = que->rxr;
3310 struct ifnet *ifp = adapter->ifp;
3311 struct lro_ctrl *lro = &rxr->lro;
3312 struct lro_entry *queued;
3313 int i, nextp, processed = 0;
3315 union ixgbe_adv_rx_desc *cur;
3316 struct ixv_rx_buf *rbuf, *nbuf;
3320 for (i = rxr->next_to_check; count != 0;) {
3321 struct mbuf *sendmp, *mh, *mp;
3323 u16 hlen, plen, hdr, vtag;
3326 /* Sync the ring. */
3327 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3328 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3330 cur = &rxr->rx_base[i];
3331 staterr = le32toh(cur->wb.upper.status_error);
3333 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3335 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3342 cur->wb.upper.status_error = 0;
3343 rbuf = &rxr->rx_buffers[i];
3347 plen = le16toh(cur->wb.upper.length);
3348 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3349 IXGBE_RXDADV_PKTTYPE_MASK;
3350 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3351 vtag = le16toh(cur->wb.upper.vlan);
3352 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3354 /* Make sure all parts of a bad packet are discarded */
3355 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3358 rxr->rx_discarded++;
3360 rxr->discard = TRUE;
3362 rxr->discard = FALSE;
3363 ixv_rx_discard(rxr, i);
3369 if (nextp == adapter->num_rx_desc)
3371 nbuf = &rxr->rx_buffers[nextp];
3375 ** The header mbuf is ONLY used when header
3376 ** split is enabled, otherwise we get normal
3377 ** behavior, ie, both header and payload
3378 ** are DMA'd into the payload buffer.
3380 ** Rather than using the fmp/lmp global pointers
3381 ** we now keep the head of a packet chain in the
3382 ** buffer struct and pass this along from one
3383 ** descriptor to the next, until we get EOP.
3385 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3386 /* This must be an initial descriptor */
3387 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3388 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3389 if (hlen > IXV_RX_HDR)
3392 mh->m_flags |= M_PKTHDR;
3394 mh->m_pkthdr.len = mh->m_len;
3395 /* Null buf pointer so it is refreshed */
3396 rbuf->m_head = NULL;
3398 ** Check the payload length, this
3399 ** could be zero if its a small
3405 mp->m_flags &= ~M_PKTHDR;
3407 mh->m_pkthdr.len += mp->m_len;
3408 /* Null buf pointer so it is refreshed */
3409 rbuf->m_pack = NULL;
3410 rxr->rx_split_packets++;
3413 ** Now create the forward
3414 ** chain so when complete
3418 /* stash the chain head */
3420 /* Make forward chain */
3422 mp->m_next = nbuf->m_pack;
3424 mh->m_next = nbuf->m_pack;
3426 /* Singlet, prepare to send */
3428 if ((adapter->num_vlans) &&
3429 (staterr & IXGBE_RXD_STAT_VP)) {
3430 sendmp->m_pkthdr.ether_vtag = vtag;
3431 sendmp->m_flags |= M_VLANTAG;
3436 ** Either no header split, or a
3437 ** secondary piece of a fragmented
3442 ** See if there is a stored head
3443 ** that determines what we are
3446 rbuf->m_pack = rbuf->fmp = NULL;
3448 if (sendmp != NULL) /* secondary frag */
3449 sendmp->m_pkthdr.len += mp->m_len;
3451 /* first desc of a non-ps chain */
3453 sendmp->m_flags |= M_PKTHDR;
3454 sendmp->m_pkthdr.len = mp->m_len;
3455 if (staterr & IXGBE_RXD_STAT_VP) {
3456 sendmp->m_pkthdr.ether_vtag = vtag;
3457 sendmp->m_flags |= M_VLANTAG;
3460 /* Pass the head pointer on */
3464 mp->m_next = nbuf->m_pack;
3468 /* Sending this frame? */
3470 sendmp->m_pkthdr.rcvif = ifp;
3473 /* capture data for AIM */
3474 rxr->bytes += sendmp->m_pkthdr.len;
3475 rxr->rx_bytes += sendmp->m_pkthdr.len;
3476 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3477 ixv_rx_checksum(staterr, sendmp, ptype);
3478 #if __FreeBSD_version >= 800000
3479 sendmp->m_pkthdr.flowid = que->msix;
3480 sendmp->m_flags |= M_FLOWID;
3484 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3485 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3487 /* Advance our pointers to the next descriptor. */
3488 if (++i == adapter->num_rx_desc)
3491 /* Now send to the stack or do LRO */
3493 ixv_rx_input(rxr, ifp, sendmp, ptype);
3495 /* Every 8 descriptors we go to refresh mbufs */
3496 if (processed == 8) {
3497 ixv_refresh_mbufs(rxr, i);
3502 /* Refresh any remaining buf structs */
3503 if (ixv_rx_unrefreshed(rxr))
3504 ixv_refresh_mbufs(rxr, i);
3506 rxr->next_to_check = i;
3509 * Flush any outstanding LRO work
3511 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3512 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3513 tcp_lro_flush(lro, queued);
3519 ** We still have cleaning to do?
3520 ** Schedule another interrupt if so.
3522 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3523 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3531 /*********************************************************************
3533 * Verify that the hardware indicated that the checksum is valid.
3534 * Inform the stack about the status of checksum so that stack
3535 * doesn't spend time verifying the checksum.
3537 *********************************************************************/
3539 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3541 u16 status = (u16) staterr;
3542 u8 errors = (u8) (staterr >> 24);
3545 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3546 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3549 if (status & IXGBE_RXD_STAT_IPCS) {
3550 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3551 /* IP Checksum Good */
3552 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3553 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3556 mp->m_pkthdr.csum_flags = 0;
3558 if (status & IXGBE_RXD_STAT_L4CS) {
3559 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3560 #if __FreeBSD_version >= 800000
3562 type = CSUM_SCTP_VALID;
3564 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3565 mp->m_pkthdr.csum_flags |= type;
3567 mp->m_pkthdr.csum_data = htons(0xffff);
3574 ixv_setup_vlan_support(struct adapter *adapter)
3576 struct ixgbe_hw *hw = &adapter->hw;
3577 u32 ctrl, vid, vfta, retry;
3581 ** We get here thru init_locked, meaning
3582 ** a soft reset, this has already cleared
3583 ** the VFTA and other state, so if there
3584 ** have been no vlan's registered do nothing.
3586 if (adapter->num_vlans == 0)
3589 /* Enable the queues */
3590 for (int i = 0; i < adapter->num_queues; i++) {
3591 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3592 ctrl |= IXGBE_RXDCTL_VME;
3593 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3597 ** A soft reset zero's out the VFTA, so
3598 ** we need to repopulate it now.
3600 for (int i = 0; i < VFTA_SIZE; i++) {
3601 if (ixv_shadow_vfta[i] == 0)
3603 vfta = ixv_shadow_vfta[i];
3605 ** Reconstruct the vlan id's
3606 ** based on the bits set in each
3607 ** of the array ints.
3609 for ( int j = 0; j < 32; j++) {
3611 if ((vfta & (1 << j)) == 0)
3614 /* Call the shared code mailbox routine */
3615 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3624 ** This routine is run via an vlan config EVENT,
3625 ** it enables us to use the HW Filter table since
3626 ** we can get the vlan id. This just creates the
3627 ** entry in the soft version of the VFTA, init will
3628 ** repopulate the real table.
3631 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3633 struct adapter *adapter = ifp->if_softc;
3636 if (ifp->if_softc != arg) /* Not our event */
3639 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3642 IXV_CORE_LOCK(adapter);
3643 index = (vtag >> 5) & 0x7F;
3645 ixv_shadow_vfta[index] |= (1 << bit);
3646 ++adapter->num_vlans;
3647 /* Re-init to load the changes */
3648 ixv_init_locked(adapter);
3649 IXV_CORE_UNLOCK(adapter);
3653 ** This routine is run via an vlan
3654 ** unconfig EVENT, remove our entry
3655 ** in the soft vfta.
3658 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3660 struct adapter *adapter = ifp->if_softc;
3663 if (ifp->if_softc != arg)
3666 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3669 IXV_CORE_LOCK(adapter);
3670 index = (vtag >> 5) & 0x7F;
3672 ixv_shadow_vfta[index] &= ~(1 << bit);
3673 --adapter->num_vlans;
3674 /* Re-init to load the changes */
3675 ixv_init_locked(adapter);
3676 IXV_CORE_UNLOCK(adapter);
3680 ixv_enable_intr(struct adapter *adapter)
3682 struct ixgbe_hw *hw = &adapter->hw;
3683 struct ix_queue *que = adapter->queues;
3684 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3687 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3689 mask = IXGBE_EIMS_ENABLE_MASK;
3690 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3691 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3693 for (int i = 0; i < adapter->num_queues; i++, que++)
3694 ixv_enable_queue(adapter, que->msix);
3696 IXGBE_WRITE_FLUSH(hw);
3702 ixv_disable_intr(struct adapter *adapter)
3704 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3705 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3706 IXGBE_WRITE_FLUSH(&adapter->hw);
3711 ** Setup the correct IVAR register for a particular MSIX interrupt
3712 ** - entry is the register array entry
3713 ** - vector is the MSIX vector for this queue
3714 ** - type is RX/TX/MISC
3717 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3719 struct ixgbe_hw *hw = &adapter->hw;
3722 vector |= IXGBE_IVAR_ALLOC_VAL;
3724 if (type == -1) { /* MISC IVAR */
3725 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3728 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3729 } else { /* RX/TX IVARS */
3730 index = (16 * (entry & 1)) + (8 * type);
3731 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3732 ivar &= ~(0xFF << index);
3733 ivar |= (vector << index);
3734 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3739 ixv_configure_ivars(struct adapter *adapter)
3741 struct ix_queue *que = adapter->queues;
3743 for (int i = 0; i < adapter->num_queues; i++, que++) {
3744 /* First the RX queue entry */
3745 ixv_set_ivar(adapter, i, que->msix, 0);
3746 /* ... and the TX */
3747 ixv_set_ivar(adapter, i, que->msix, 1);
3748 /* Set an initial value in EITR */
3749 IXGBE_WRITE_REG(&adapter->hw,
3750 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3753 /* For the Link interrupt */
3754 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3759 ** Tasklet handler for MSIX MBX interrupts
3760 ** - do outside interrupt since it might sleep
3763 ixv_handle_mbx(void *context, int pending)
3765 struct adapter *adapter = context;
3767 ixgbe_check_link(&adapter->hw,
3768 &adapter->link_speed, &adapter->link_up, 0);
3769 ixv_update_link_status(adapter);
3773 ** The VF stats registers never have a truely virgin
3774 ** starting point, so this routine tries to make an
3775 ** artificial one, marking ground zero on attach as
3779 ixv_save_stats(struct adapter *adapter)
3781 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3782 adapter->stats.saved_reset_vfgprc +=
3783 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3784 adapter->stats.saved_reset_vfgptc +=
3785 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3786 adapter->stats.saved_reset_vfgorc +=
3787 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3788 adapter->stats.saved_reset_vfgotc +=
3789 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3790 adapter->stats.saved_reset_vfmprc +=
3791 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3796 ixv_init_stats(struct adapter *adapter)
3798 struct ixgbe_hw *hw = &adapter->hw;
3800 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3801 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3802 adapter->stats.last_vfgorc |=
3803 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3805 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3806 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3807 adapter->stats.last_vfgotc |=
3808 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3810 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3812 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3813 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3814 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3815 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3816 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3819 #define UPDATE_STAT_32(reg, last, count) \
3821 u32 current = IXGBE_READ_REG(hw, reg); \
3822 if (current < last) \
3823 count += 0x100000000LL; \
3825 count &= 0xFFFFFFFF00000000LL; \
3829 #define UPDATE_STAT_36(lsb, msb, last, count) \
3831 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3832 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3833 u64 current = ((cur_msb << 32) | cur_lsb); \
3834 if (current < last) \
3835 count += 0x1000000000LL; \
3837 count &= 0xFFFFFFF000000000LL; \
3842 ** ixv_update_stats - Update the board statistics counters.
3845 ixv_update_stats(struct adapter *adapter)
3847 struct ixgbe_hw *hw = &adapter->hw;
3849 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3850 adapter->stats.vfgprc);
3851 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3852 adapter->stats.vfgptc);
3853 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3854 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3855 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3856 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3857 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3858 adapter->stats.vfmprc);
3861 /**********************************************************************
3863 * This routine is called only when ixgbe_display_debug_stats is enabled.
3864 * This routine provides a way to take a look at important statistics
3865 * maintained by the driver and hardware.
3867 **********************************************************************/
3869 ixv_print_hw_stats(struct adapter * adapter)
3871 device_t dev = adapter->dev;
3873 device_printf(dev,"Std Mbuf Failed = %lu\n",
3874 adapter->mbuf_defrag_failed);
3875 device_printf(dev,"Driver dropped packets = %lu\n",
3876 adapter->dropped_pkts);
3877 device_printf(dev, "watchdog timeouts = %ld\n",
3878 adapter->watchdog_events);
3880 device_printf(dev,"Good Packets Rcvd = %llu\n",
3881 (long long)adapter->stats.vfgprc);
3882 device_printf(dev,"Good Packets Xmtd = %llu\n",
3883 (long long)adapter->stats.vfgptc);
3884 device_printf(dev,"TSO Transmissions = %lu\n",
3889 /**********************************************************************
3891 * This routine is called only when em_display_debug_stats is enabled.
3892 * This routine provides a way to take a look at important statistics
3893 * maintained by the driver and hardware.
3895 **********************************************************************/
3897 ixv_print_debug_info(struct adapter *adapter)
3899 device_t dev = adapter->dev;
3900 struct ixgbe_hw *hw = &adapter->hw;
3901 struct ix_queue *que = adapter->queues;
3902 struct rx_ring *rxr;
3903 struct tx_ring *txr;
3904 struct lro_ctrl *lro;
3906 device_printf(dev,"Error Byte Count = %u \n",
3907 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3909 for (int i = 0; i < adapter->num_queues; i++, que++) {
3913 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3914 que->msix, (long)que->irqs);
3915 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3916 rxr->me, (long long)rxr->rx_packets);
3917 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3918 rxr->me, (long long)rxr->rx_split_packets);
3919 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3920 rxr->me, (long)rxr->rx_bytes);
3921 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3922 rxr->me, lro->lro_queued);
3923 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3924 rxr->me, lro->lro_flushed);
3925 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3926 txr->me, (long)txr->total_packets);
3927 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3928 txr->me, (long)txr->no_desc_avail);
3931 device_printf(dev,"MBX IRQ Handled: %lu\n",
3932 (long)adapter->mbx_irq);
3937 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3941 struct adapter *adapter;
3944 error = sysctl_handle_int(oidp, &result, 0, req);
3946 if (error || !req->newptr)
3950 adapter = (struct adapter *) arg1;
3951 ixv_print_hw_stats(adapter);
3957 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3960 struct adapter *adapter;
3963 error = sysctl_handle_int(oidp, &result, 0, req);
3965 if (error || !req->newptr)
3969 adapter = (struct adapter *) arg1;
3970 ixv_print_debug_info(adapter);
3976 ** Set flow control using sysctl:
3977 ** Flow control values:
3984 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3987 struct adapter *adapter;
3989 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3994 adapter = (struct adapter *) arg1;
3995 switch (ixv_flow_control) {
3996 case ixgbe_fc_rx_pause:
3997 case ixgbe_fc_tx_pause:
3999 adapter->hw.fc.requested_mode = ixv_flow_control;
4003 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4006 ixgbe_fc_enable(&adapter->hw, 0);
4011 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4012 const char *description, int *limit, int value)
4015 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4016 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4017 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);