1 /*******************************************************************************
3 Copyright (c) 2001-2007, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
41 /*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44 int ixgbe_display_debug_stats = 0;
46 /*********************************************************************
48 *********************************************************************/
49 char ixgbe_driver_version[] = "1.2.6";
51 /*********************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 *********************************************************************/
61 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
66 /* required last entry */
70 /*********************************************************************
71 * Table of branding strings
72 *********************************************************************/
74 static char *ixgbe_strings[] = {
75 "Intel(R) PRO/10GbE PCI-Express Network Driver"
78 /*********************************************************************
80 *********************************************************************/
81 static int ixgbe_probe(device_t);
82 static int ixgbe_attach(device_t);
83 static int ixgbe_detach(device_t);
84 static int ixgbe_shutdown(device_t);
85 static void ixgbe_start(struct ifnet *);
86 static void ixgbe_start_locked(struct ifnet *);
87 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
88 static void ixgbe_watchdog(struct adapter *);
89 static void ixgbe_init(void *);
90 static void ixgbe_init_locked(struct adapter *);
91 static void ixgbe_stop(void *);
92 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
93 static int ixgbe_media_change(struct ifnet *);
94 static void ixgbe_identify_hardware(struct adapter *);
95 static int ixgbe_allocate_pci_resources(struct adapter *);
96 static void ixgbe_free_pci_resources(struct adapter *);
97 static void ixgbe_local_timer(void *);
98 static int ixgbe_hardware_init(struct adapter *);
99 static void ixgbe_setup_interface(device_t, struct adapter *);
100 static int ixgbe_allocate_queues(struct adapter *);
101 static int ixgbe_allocate_msix_resources(struct adapter *);
102 #if __FreeBSD_version >= 700000
103 static int ixgbe_setup_msix(struct adapter *);
105 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
106 static int ixgbe_setup_transmit_structures(struct adapter *);
107 static void ixgbe_setup_transmit_ring(struct tx_ring *);
108 static void ixgbe_initialize_transmit_units(struct adapter *);
109 static void ixgbe_free_transmit_structures(struct adapter *);
110 static void ixgbe_free_transmit_buffers(struct tx_ring *);
112 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
113 static int ixgbe_setup_receive_structures(struct adapter *);
114 static int ixgbe_setup_receive_ring(struct rx_ring *);
115 static void ixgbe_initialize_receive_units(struct adapter *);
116 static void ixgbe_free_receive_structures(struct adapter *);
117 static void ixgbe_free_receive_buffers(struct rx_ring *);
119 static void ixgbe_enable_intr(struct adapter *);
120 static void ixgbe_disable_intr(struct adapter *);
121 static void ixgbe_update_stats_counters(struct adapter *);
122 static bool ixgbe_txeof(struct tx_ring *);
123 static int ixgbe_rxeof(struct rx_ring *, int);
124 static void ixgbe_rx_checksum(struct adapter *, uint32_t, struct mbuf *);
125 static void ixgbe_set_promisc(struct adapter *);
126 static void ixgbe_disable_promisc(struct adapter *);
127 static void ixgbe_set_multi(struct adapter *);
128 static void ixgbe_print_hw_stats(struct adapter *);
129 static void ixgbe_print_debug_info(struct adapter *);
130 static void ixgbe_update_link_status(struct adapter *);
131 static int ixgbe_get_buf(struct rx_ring *, int);
132 static void ixgbe_enable_vlans(struct adapter * adapter);
133 static int ixgbe_encap(struct adapter *, struct mbuf **);
134 static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
135 static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
136 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
137 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
138 struct ixgbe_dma_alloc *, int);
139 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
140 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
141 const char *, int *, int);
142 static boolean_t ixgbe_tx_csum_setup(struct tx_ring *, struct mbuf *);
143 static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
144 static void ixgbe_set_ivar(struct adapter *, u16, u8);
145 static void ixgbe_configure_ivars(struct adapter *);
147 /* Legacy Fast Interrupt routine and handlers */
148 #if __FreeBSD_version >= 700000
149 static int ixgbe_fast_irq(void *);
150 /* The MSI/X Interrupt handlers */
151 static void ixgbe_msix_tx(void *);
152 static void ixgbe_msix_rx(void *);
153 static void ixgbe_msix_link(void *);
155 static void ixgbe_fast_irq(void *);
158 static void ixgbe_rxtx(void *context, int pending);
159 static void ixgbe_link(void *context, int pending);
161 #ifndef NO_82598_A0_SUPPORT
162 static void desc_flip(void *);
165 /*********************************************************************
166 * FreeBSD Device Interface Entry Points
167 *********************************************************************/
169 static device_method_t ixgbe_methods[] = {
170 /* Device interface */
171 DEVMETHOD(device_probe, ixgbe_probe),
172 DEVMETHOD(device_attach, ixgbe_attach),
173 DEVMETHOD(device_detach, ixgbe_detach),
174 DEVMETHOD(device_shutdown, ixgbe_shutdown),
178 static driver_t ixgbe_driver = {
179 "ix", ixgbe_methods, sizeof(struct adapter),
182 static devclass_t ixgbe_devclass;
183 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
185 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
186 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
189 ** TUNEABLE PARAMETERS:
192 /* How many packets rxeof tries to clean at a time */
193 static int ixgbe_rx_process_limit = 100;
194 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
196 /* Flow control setting, default to full */
197 static int ixgbe_flow_control = 3;
198 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
200 /* Number of TX Queues, note multi tx is not working */
201 static int ixgbe_tx_queues = 1;
202 TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
204 /* Number of RX Queues */
205 static int ixgbe_rx_queues = 8;
206 TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
208 /* Number of Other Queues, this is used for link interrupts */
209 static int ixgbe_other_queues = 1;
210 TUNABLE_INT("hw.ixgbe.other_queues", &ixgbe_other_queues);
212 /* Number of TX descriptors per ring */
213 static int ixgbe_txd = DEFAULT_TXD;
214 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
216 /* Number of RX descriptors per ring */
217 static int ixgbe_rxd = DEFAULT_RXD;
218 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
220 /* Total number of Interfaces - need for config sanity check */
221 static int ixgbe_total_ports;
223 /*********************************************************************
224 * Device identification routine
226 * ixgbe_probe determines if the driver should be loaded on
227 * adapter based on PCI vendor/device id of the adapter.
229 * return 0 on success, positive on failure
230 *********************************************************************/
233 ixgbe_probe(device_t dev)
235 ixgbe_vendor_info_t *ent;
237 u_int16_t pci_vendor_id = 0;
238 u_int16_t pci_device_id = 0;
239 u_int16_t pci_subvendor_id = 0;
240 u_int16_t pci_subdevice_id = 0;
241 char adapter_name[60];
243 INIT_DEBUGOUT("ixgbe_probe: begin");
245 pci_vendor_id = pci_get_vendor(dev);
246 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
249 pci_device_id = pci_get_device(dev);
250 pci_subvendor_id = pci_get_subvendor(dev);
251 pci_subdevice_id = pci_get_subdevice(dev);
253 ent = ixgbe_vendor_info_array;
254 while (ent->vendor_id != 0) {
255 if ((pci_vendor_id == ent->vendor_id) &&
256 (pci_device_id == ent->device_id) &&
258 ((pci_subvendor_id == ent->subvendor_id) ||
259 (ent->subvendor_id == 0)) &&
261 ((pci_subdevice_id == ent->subdevice_id) ||
262 (ent->subdevice_id == 0))) {
263 sprintf(adapter_name, "%s, Version - %s",
264 ixgbe_strings[ent->index],
265 ixgbe_driver_version);
266 switch (pci_device_id) {
267 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
268 ixgbe_total_ports += 2;
270 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
271 ixgbe_total_ports += 1;
275 device_set_desc_copy(dev, adapter_name);
284 /*********************************************************************
285 * Device initialization routine
287 * The attach entry point is called when the driver is being loaded.
288 * This routine identifies the type of hardware, allocates all resources
289 * and initializes the hardware.
291 * return 0 on success, positive on failure
292 *********************************************************************/
295 ixgbe_attach(device_t dev)
297 struct adapter *adapter;
300 char name_string[16];
302 INIT_DEBUGOUT("ixgbe_attach: begin");
304 /* Allocate, clear, and link in our adapter structure */
305 adapter = device_get_softc(dev);
306 adapter->dev = adapter->osdep.dev = dev;
307 /* General Lock Init*/
308 snprintf(name_string, sizeof(name_string), "%s:core",
309 device_get_nameunit(dev));
310 mtx_init(&adapter->core_mtx, name_string, NULL, MTX_DEF);
313 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
314 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
315 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
316 adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
318 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
319 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
320 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
321 adapter, 0, ixgbe_sysctl_debug, "I", "Debug Info");
323 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
324 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
325 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
326 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
328 /* Set up the timer callout */
329 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
331 /* Determine hardware revision */
332 ixgbe_identify_hardware(adapter);
334 /* Indicate to RX setup to use Jumbo Clusters */
335 adapter->bigbufs = TRUE;
337 /* Do base PCI setup - map BAR0 */
338 if (ixgbe_allocate_pci_resources(adapter)) {
339 device_printf(dev, "Allocation of PCI resources failed\n");
344 /* Do descriptor calc and sanity checks */
345 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
346 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
347 device_printf(dev, "TXD config issue, using default!\n");
348 adapter->num_tx_desc = DEFAULT_TXD;
350 adapter->num_tx_desc = ixgbe_txd;
353 ** With many RX rings it is easy to exceed the
354 ** system mbuf allocation. Tuning nmbclusters
355 ** can alleviate this.
357 if ((adapter->num_rx_queues > 1) && (nmbclusters > 0 )){
359 /* Calculate the total RX mbuf needs */
360 s = (ixgbe_rxd * adapter->num_rx_queues) * ixgbe_total_ports;
361 if (s > nmbclusters) {
362 device_printf(dev, "RX Descriptors exceed "
363 "system mbuf max, using default instead!\n");
364 ixgbe_rxd = DEFAULT_RXD;
368 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
369 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
370 device_printf(dev, "RXD config issue, using default!\n");
371 adapter->num_rx_desc = DEFAULT_RXD;
373 adapter->num_rx_desc = ixgbe_rxd;
375 /* Allocate our TX/RX Queues */
376 if (ixgbe_allocate_queues(adapter)) {
381 #if __FreeBSD_version >= 700000
383 error = ixgbe_setup_msix(adapter);
389 /* Initialize the shared code */
390 if (ixgbe_init_shared_code(&adapter->hw)) {
391 device_printf(dev,"Unable to initialize the shared code\n");
396 /* Initialize the hardware */
397 if (ixgbe_hardware_init(adapter)) {
398 device_printf(dev,"Unable to initialize the hardware\n");
403 /* Setup OS specific network interface */
404 ixgbe_setup_interface(dev, adapter);
406 /* Sysctl for limiting the amount of work done in the taskqueue */
407 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
408 "max number of rx packets to process", &adapter->rx_process_limit,
409 ixgbe_rx_process_limit);
411 /* Initialize statistics */
412 ixgbe_update_stats_counters(adapter);
414 /* let hardware know driver is loaded */
415 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
416 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
417 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
419 INIT_DEBUGOUT("ixgbe_attach: end");
423 ixgbe_free_pci_resources(adapter);
428 /*********************************************************************
429 * Device removal routine
431 * The detach entry point is called when the driver is being removed.
432 * This routine stops the adapter and deallocates all the resources
433 * that were allocated for driver operation.
435 * return 0 on success, positive on failure
436 *********************************************************************/
439 ixgbe_detach(device_t dev)
441 struct adapter *adapter = device_get_softc(dev);
444 INIT_DEBUGOUT("ixgbe_detach: begin");
446 /* Make sure VLANS are not using driver */
447 #if __FreeBSD_version >= 700000
448 if (adapter->ifp->if_vlantrunk != NULL) {
450 if (adapter->ifp->if_nvlans != 0) {
452 device_printf(dev,"Vlan in use, detach first\n");
456 mtx_lock(&adapter->core_mtx);
458 mtx_unlock(&adapter->core_mtx);
460 if (adapter->tq != NULL) {
461 taskqueue_drain(adapter->tq, &adapter->rxtx_task);
462 taskqueue_drain(taskqueue_fast, &adapter->link_task);
463 taskqueue_free(adapter->tq);
467 /* let hardware know driver is unloading */
468 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
469 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
470 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
472 ether_ifdetach(adapter->ifp);
473 callout_drain(&adapter->timer);
474 ixgbe_free_pci_resources(adapter);
475 bus_generic_detach(dev);
476 if_free(adapter->ifp);
478 ixgbe_free_transmit_structures(adapter);
479 ixgbe_free_receive_structures(adapter);
481 mtx_destroy(&adapter->core_mtx);
485 /*********************************************************************
487 * Shutdown entry point
489 **********************************************************************/
492 ixgbe_shutdown(device_t dev)
494 struct adapter *adapter = device_get_softc(dev);
495 mtx_lock(&adapter->core_mtx);
497 mtx_unlock(&adapter->core_mtx);
502 /*********************************************************************
503 * Transmit entry point
505 * ixgbe_start is called by the stack to initiate a transmit.
506 * The driver will remain in this routine as long as there are
507 * packets to transmit and transmit resources are available.
508 * In case resources are not available stack is notified and
509 * the packet is requeued.
510 **********************************************************************/
513 ixgbe_start_locked(struct ifnet * ifp)
516 struct adapter *adapter = ifp->if_softc;
518 mtx_assert(&adapter->tx_mtx, MA_OWNED);
520 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
523 if (!adapter->link_active)
526 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
528 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
532 if (ixgbe_encap(adapter, &m_head)) {
535 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
536 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
539 /* Send a copy of the frame to the BPF listener */
540 ETHER_BPF_MTAP(ifp, m_head);
542 /* Set timeout in case hardware has problems transmitting */
543 adapter->watchdog_timer = IXGBE_TX_TIMEOUT;
550 ixgbe_start(struct ifnet *ifp)
552 struct adapter *adapter = ifp->if_softc;
554 mtx_lock(&adapter->tx_mtx);
555 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
556 ixgbe_start_locked(ifp);
557 mtx_unlock(&adapter->tx_mtx);
561 /*********************************************************************
564 * ixgbe_ioctl is called when the user wants to configure the
567 * return 0 on success, positive on failure
568 **********************************************************************/
571 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
574 struct ifreq *ifr = (struct ifreq *) data;
575 struct ifaddr *ifa = (struct ifaddr *) data;
576 struct adapter *adapter = ifp->if_softc;
580 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
581 if (ifa->ifa_addr->sa_family == AF_INET) {
582 ifp->if_flags |= IFF_UP;
583 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
584 mtx_lock(&adapter->core_mtx);
585 ixgbe_init_locked(adapter);
586 mtx_unlock(&adapter->core_mtx);
588 arp_ifinit(ifp, ifa);
590 ether_ioctl(ifp, command, data);
593 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
594 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
597 mtx_lock(&adapter->core_mtx);
598 ifp->if_mtu = ifr->ifr_mtu;
599 adapter->max_frame_size =
600 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
601 ixgbe_init_locked(adapter);
602 mtx_unlock(&adapter->core_mtx);
606 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
607 mtx_lock(&adapter->core_mtx);
608 if (ifp->if_flags & IFF_UP) {
609 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
610 if ((ifp->if_flags ^ adapter->if_flags) &
612 ixgbe_disable_promisc(adapter);
613 ixgbe_set_promisc(adapter);
616 ixgbe_init_locked(adapter);
618 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
620 adapter->if_flags = ifp->if_flags;
621 mtx_unlock(&adapter->core_mtx);
625 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
626 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
627 mtx_lock(&adapter->core_mtx);
628 ixgbe_disable_intr(adapter);
629 ixgbe_set_multi(adapter);
630 ixgbe_enable_intr(adapter);
631 mtx_unlock(&adapter->core_mtx);
636 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
637 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
641 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
642 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
643 if (mask & IFCAP_HWCSUM)
644 ifp->if_capenable ^= IFCAP_HWCSUM;
645 if (mask & IFCAP_TSO4)
646 ifp->if_capenable ^= IFCAP_TSO4;
647 if (mask & IFCAP_VLAN_HWTAGGING)
648 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
649 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
651 #if __FreeBSD_version >= 700000
652 VLAN_CAPABILITIES(ifp);
657 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
658 error = ether_ioctl(ifp, command, data);
665 /*********************************************************************
666 * Watchdog entry point
668 * This routine is called whenever hardware quits transmitting.
670 **********************************************************************/
673 ixgbe_watchdog(struct adapter *adapter)
676 mtx_assert(&adapter->core_mtx, MA_OWNED);
679 * The timer is set to 5 every time ixgbe_start() queues a packet.
680 * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
681 * least one descriptor.
682 * Finally, anytime all descriptors are clean the timer is
685 if (adapter->watchdog_timer == 0 || --adapter->watchdog_timer)
689 * If we are in this routine because of pause frames, then don't
690 * reset the hardware.
692 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
693 adapter->watchdog_timer = IXGBE_TX_TIMEOUT;
698 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
699 ixgbe_print_debug_info(adapter);
701 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
702 adapter->watchdog_events++;
704 ixgbe_init_locked(adapter);
708 /*********************************************************************
711 * This routine is used in two ways. It is used by the stack as
712 * init entry point in network interface structure. It is also used
713 * by the driver as a hw/sw initialization routine to get to a
716 * return 0 on success, positive on failure
717 **********************************************************************/
718 #define IXGBE_MHADD_MFS_SHIFT 16
721 ixgbe_init_locked(struct adapter *adapter)
723 struct ifnet *ifp = adapter->ifp;
724 device_t dev = adapter->dev;
725 u32 txdctl, rxdctl, mhadd;
727 INIT_DEBUGOUT("ixgbe_init: begin");
729 mtx_assert(&adapter->core_mtx, MA_OWNED);
733 /* Get the latest mac address, User can use a LAA */
734 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
735 IXGBE_ETH_LENGTH_OF_ADDRESS);
736 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, 1);
737 adapter->hw.addr_ctrl.rar_used_count = 1;
739 /* Initialize the hardware */
740 if (ixgbe_hardware_init(adapter)) {
741 device_printf(dev, "Unable to initialize the hardware\n");
745 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
746 ixgbe_enable_vlans(adapter);
748 /* Prepare transmit descriptors and buffers */
749 if (ixgbe_setup_transmit_structures(adapter)) {
750 device_printf(dev,"Could not setup transmit structures\n");
755 ixgbe_initialize_transmit_units(adapter);
757 /* Setup Multicast table */
758 ixgbe_set_multi(adapter);
761 ** If we are resetting MTU smaller than 2K
762 ** drop to small RX buffers
764 if (adapter->max_frame_size <= MCLBYTES)
765 adapter->bigbufs = FALSE;
767 /* Prepare receive descriptors and buffers */
768 if (ixgbe_setup_receive_structures(adapter)) {
769 device_printf(dev,"Could not setup receive structures\n");
774 /* Configure RX settings */
775 ixgbe_initialize_receive_units(adapter);
777 /* Enable Enhanced MSIX mode */
780 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
781 gpie |= IXGBE_GPIE_MSIX_MODE;
782 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
784 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
785 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
788 /* Set the various hardware offload abilities */
789 ifp->if_hwassist = 0;
790 if (ifp->if_capenable & IFCAP_TSO4)
791 ifp->if_hwassist |= CSUM_TSO;
792 else if (ifp->if_capenable & IFCAP_TXCSUM)
793 ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
796 if (ifp->if_mtu > ETHERMTU) {
797 mhadd = IXGBE_READ_REG(&adapter->hw, IXGBE_MHADD);
798 mhadd &= ~IXGBE_MHADD_MFS_MASK;
799 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
800 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MHADD, mhadd);
803 /* Now enable all the queues */
805 for (int i = 0; i < adapter->num_tx_queues; i++) {
806 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
807 txdctl |= IXGBE_TXDCTL_ENABLE;
808 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
811 for (int i = 0; i < adapter->num_rx_queues; i++) {
812 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
813 rxdctl |= IXGBE_RXDCTL_ENABLE;
814 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
817 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
819 /* Set up MSI/X routing */
820 ixgbe_configure_ivars(adapter);
822 ixgbe_enable_intr(adapter);
824 /* Now inform the stack we're ready */
825 ifp->if_drv_flags |= IFF_DRV_RUNNING;
826 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
832 ixgbe_init(void *arg)
834 struct adapter *adapter = arg;
836 mtx_lock(&adapter->core_mtx);
837 ixgbe_init_locked(adapter);
838 mtx_unlock(&adapter->core_mtx);
844 ixgbe_link(void *context, int pending)
846 struct adapter *adapter = context;
847 struct ifnet *ifp = adapter->ifp;
849 mtx_lock(&adapter->core_mtx);
850 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
851 mtx_unlock(&adapter->core_mtx);
855 callout_stop(&adapter->timer);
856 ixgbe_update_link_status(adapter);
857 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
858 mtx_unlock(&adapter->core_mtx);
862 ** MSI and Legacy Deferred Handler
863 ** - note this runs without the general lock
866 ixgbe_rxtx(void *context, int pending)
868 struct adapter *adapter = context;
869 struct ifnet *ifp = adapter->ifp;
870 /* For legacy there is only one of each */
871 struct rx_ring *rxr = adapter->rx_rings;
872 struct tx_ring *txr = adapter->tx_rings;
874 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
875 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0)
876 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
877 mtx_lock(&adapter->tx_mtx);
880 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
881 ixgbe_start_locked(ifp);
882 mtx_unlock(&adapter->tx_mtx);
885 ixgbe_enable_intr(adapter);
889 /*********************************************************************
891 * Legacy Interrupt Service routine
893 **********************************************************************/
895 #if __FreeBSD_version >= 700000
900 ixgbe_fast_irq(void *arg)
903 struct adapter *adapter = arg;
905 reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
909 ixgbe_disable_intr(adapter);
910 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
912 /* Link status change */
913 if (reg_eicr & IXGBE_EICR_LSC)
914 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
916 return FILTER_HANDLED;
920 #if __FreeBSD_version >= 700000
921 /*********************************************************************
923 * MSI TX Interrupt Service routine
925 **********************************************************************/
928 ixgbe_msix_tx(void *arg)
930 struct tx_ring *txr = arg;
931 struct adapter *adapter = txr->adapter;
932 struct ifnet *ifp = adapter->ifp;
933 uint32_t loop_cnt = MAX_INTR;
935 mtx_lock(&adapter->tx_mtx);
937 while (loop_cnt > 0) {
938 if (__predict_false(!ixgbe_txeof(txr)))
943 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
944 ifp->if_snd.ifq_head != NULL)
945 ixgbe_start_locked(ifp);
946 ixgbe_enable_intr(adapter);
947 mtx_unlock(&adapter->tx_mtx);
951 /*********************************************************************
953 * MSI RX Interrupt Service routine
955 **********************************************************************/
958 ixgbe_msix_rx(void *arg)
960 struct rx_ring *rxr = arg;
961 struct adapter *adapter = rxr->adapter;
962 struct ifnet *ifp = adapter->ifp;
963 uint32_t loop = MAX_INTR;
966 while ((loop-- > 0) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
967 ixgbe_rxeof(rxr, adapter->rx_process_limit);
969 ixgbe_enable_intr(adapter);
973 ixgbe_msix_link(void *arg)
975 struct adapter *adapter = arg;
978 mtx_lock(&adapter->core_mtx);
980 reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
982 if (reg_eicr & IXGBE_EICR_LSC) {
983 callout_stop(&adapter->timer);
984 ixgbe_update_link_status(adapter);
985 callout_reset(&adapter->timer, hz,
986 ixgbe_local_timer, adapter);
989 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
990 ixgbe_enable_intr(adapter);
991 mtx_unlock(&adapter->core_mtx);
993 #endif /* __FreeBSD_version >= 700000 */
995 /*********************************************************************
997 * Media Ioctl callback
999 * This routine is called whenever the user queries the status of
1000 * the interface using ifconfig.
1002 **********************************************************************/
1004 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1006 struct adapter *adapter = ifp->if_softc;
1008 INIT_DEBUGOUT("ixgbe_media_status: begin");
1009 ixgbe_update_link_status(adapter);
1011 ifmr->ifm_status = IFM_AVALID;
1012 ifmr->ifm_active = IFM_ETHER;
1014 if (!adapter->link_active)
1017 ifmr->ifm_status |= IFM_ACTIVE;
1018 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1023 /*********************************************************************
1025 * Media Ioctl callback
1027 * This routine is called when the user changes speed/duplex using
1028 * media/mediopt option with ifconfig.
1030 **********************************************************************/
1032 ixgbe_media_change(struct ifnet * ifp)
1034 struct adapter *adapter = ifp->if_softc;
1035 struct ifmedia *ifm = &adapter->media;
1037 INIT_DEBUGOUT("ixgbe_media_change: begin");
1039 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1045 /*********************************************************************
1047 * This routine maps the mbufs to tx descriptors.
1048 * WARNING: while this code is using an MQ style infrastructure,
1049 * it would NOT work as is with more than 1 queue.
1051 * return 0 on success, positive on failure
1052 **********************************************************************/
1055 ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp)
1057 u32 olinfo_status = 0, cmd_type_len = 0;
1059 int i, j, error, nsegs;
1060 int first, last = 0;
1061 struct mbuf *m_head;
1062 bus_dma_segment_t segs[IXGBE_MAX_SCATTER];
1064 struct tx_ring *txr = adapter->tx_rings;
1065 struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1066 union ixgbe_adv_tx_desc *txd = NULL;
1071 /* Basic descriptor defines */
1072 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
1073 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
1075 if (m_head->m_flags & M_VLANTAG)
1076 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1079 * Force a cleanup if number of TX descriptors
1080 * available is below the threshold. If it fails
1081 * to get above, then abort transmit.
1083 if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
1085 /* Make sure things have improved */
1086 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
1087 adapter->no_tx_desc_avail1++;
1093 * Important to capture the first descriptor
1094 * used because it will contain the index of
1095 * the one we tell the hardware to report back
1097 first = txr->next_avail_tx_desc;
1098 txbuf = &txr->tx_buffers[first];
1099 txbuf_mapped = txbuf;
1103 * Map the packet for DMA.
1105 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1106 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1108 if (error == EFBIG) {
1111 m = m_defrag(*m_headp, M_DONTWAIT);
1113 adapter->mbuf_alloc_failed++;
1121 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1122 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1124 if (error == ENOMEM) {
1125 adapter->no_tx_dma_setup++;
1127 } else if (error != 0) {
1128 adapter->no_tx_dma_setup++;
1133 } else if (error == ENOMEM) {
1134 adapter->no_tx_dma_setup++;
1136 } else if (error != 0) {
1137 adapter->no_tx_dma_setup++;
1143 /* Make certain there are enough descriptors */
1144 if (nsegs > txr->tx_avail - 2) {
1145 adapter->no_tx_desc_avail2++;
1152 ** Set the appropriate offload context
1153 ** this becomes the first descriptor of
1156 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1157 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1158 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1159 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1160 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1162 } else if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
1163 if (ixgbe_tx_csum_setup(txr, m_head))
1164 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1167 i = txr->next_avail_tx_desc;
1168 for (j = 0; j < nsegs; j++) {
1172 txbuf = &txr->tx_buffers[i];
1173 txd = &txr->tx_base[i];
1174 seglen = segs[j].ds_len;
1175 segaddr = htole64(segs[j].ds_addr);
1177 txd->read.buffer_addr = segaddr;
1178 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1179 cmd_type_len |seglen);
1180 txd->read.olinfo_status = htole32(olinfo_status);
1181 last = i; /* Next descriptor that will get completed */
1183 if (++i == adapter->num_tx_desc)
1186 txbuf->m_head = NULL;
1187 txbuf->next_eop = -1;
1189 ** we have to do this inside the loop right now
1190 ** because of the hardware workaround.
1192 if (j == (nsegs -1)) /* Last descriptor gets EOP and RS */
1193 txd->read.cmd_type_len |=
1194 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1195 #ifndef NO_82598_A0_SUPPORT
1196 if (adapter->hw.revision_id == 0)
1201 txr->tx_avail -= nsegs;
1202 txr->next_avail_tx_desc = i;
1204 txbuf->m_head = m_head;
1206 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1208 /* Set the index of the descriptor that will be marked done */
1209 txbuf = &txr->tx_buffers[first];
1210 txbuf->next_eop = last;
1212 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1213 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1215 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1216 * hardware that this frame is available to transmit.
1218 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1222 bus_dmamap_unload(txr->txtag, txbuf->map);
1228 ixgbe_set_promisc(struct adapter *adapter)
1232 struct ifnet *ifp = adapter->ifp;
1234 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1236 if (ifp->if_flags & IFF_PROMISC) {
1237 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1238 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1239 } else if (ifp->if_flags & IFF_ALLMULTI) {
1240 reg_rctl |= IXGBE_FCTRL_MPE;
1241 reg_rctl &= ~IXGBE_FCTRL_UPE;
1242 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1248 ixgbe_disable_promisc(struct adapter * adapter)
1252 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1254 reg_rctl &= (~IXGBE_FCTRL_UPE);
1255 reg_rctl &= (~IXGBE_FCTRL_MPE);
1256 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1262 /*********************************************************************
1265 * This routine is called whenever multicast address list is updated.
1267 **********************************************************************/
1268 #define IXGBE_RAR_ENTRIES 16
1271 ixgbe_set_multi(struct adapter *adapter)
1274 uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1275 struct ifmultiaddr *ifma;
1277 struct ifnet *ifp = adapter->ifp;
1279 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1281 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1282 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1283 if (ifp->if_flags & IFF_PROMISC)
1284 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1285 else if (ifp->if_flags & IFF_ALLMULTI) {
1286 fctrl |= IXGBE_FCTRL_MPE;
1287 fctrl &= ~IXGBE_FCTRL_UPE;
1289 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1291 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1294 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1295 if (ifma->ifma_addr->sa_family != AF_LINK)
1297 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1298 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1299 IXGBE_ETH_LENGTH_OF_ADDRESS);
1302 IF_ADDR_UNLOCK(ifp);
1304 ixgbe_update_mc_addr_list(&adapter->hw, mta, mcnt, 0);
1310 /*********************************************************************
1313 * This routine checks for link status,updates statistics,
1314 * and runs the watchdog timer.
1316 **********************************************************************/
1319 ixgbe_local_timer(void *arg)
1321 struct adapter *adapter = arg;
1322 struct ifnet *ifp = adapter->ifp;
1324 mtx_assert(&adapter->core_mtx, MA_OWNED);
1326 ixgbe_update_link_status(adapter);
1327 ixgbe_update_stats_counters(adapter);
1328 if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1329 ixgbe_print_hw_stats(adapter);
1332 * Each second we check the watchdog
1333 * to protect against hardware hangs.
1335 ixgbe_watchdog(adapter);
1337 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1341 ixgbe_update_link_status(struct adapter *adapter)
1343 uint32_t link_speed;
1344 boolean_t link_up = FALSE;
1345 struct ifnet *ifp = adapter->ifp;
1346 device_t dev = adapter->dev;
1348 ixgbe_check_link(&adapter->hw, &link_speed, &link_up);
1351 if (adapter->link_active == FALSE) {
1353 device_printf(dev,"Link is up %d Mbps %s \n",
1354 10000, "Full Duplex");
1355 adapter->link_active = TRUE;
1356 if_link_state_change(ifp, LINK_STATE_UP);
1358 } else { /* Link down */
1359 if (adapter->link_active == TRUE) {
1361 device_printf(dev,"Link is Down\n");
1362 if_link_state_change(ifp, LINK_STATE_DOWN);
1363 adapter->link_active = FALSE;
1372 /*********************************************************************
1374 * This routine disables all traffic on the adapter by issuing a
1375 * global reset on the MAC and deallocates TX/RX buffers.
1377 **********************************************************************/
1380 ixgbe_stop(void *arg)
1383 struct adapter *adapter = arg;
1386 mtx_assert(&adapter->core_mtx, MA_OWNED);
1388 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1389 ixgbe_disable_intr(adapter);
1391 /* Tell the stack that the interface is no longer active */
1392 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1394 ixgbe_reset_hw(&adapter->hw);
1395 adapter->hw.adapter_stopped = FALSE;
1396 ixgbe_stop_adapter(&adapter->hw);
1397 callout_stop(&adapter->timer);
1399 /* reprogram the RAR[0] in case user changed it. */
1400 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1406 /*********************************************************************
1408 * Determine hardware revision.
1410 **********************************************************************/
1412 ixgbe_identify_hardware(struct adapter *adapter)
1414 device_t dev = adapter->dev;
1416 /* Save off the information about this board */
1417 adapter->hw.vendor_id = pci_get_vendor(dev);
1418 adapter->hw.device_id = pci_get_device(dev);
1419 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1420 adapter->hw.subsystem_vendor_id =
1421 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1422 adapter->hw.subsystem_device_id =
1423 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1428 #if __FreeBSD_version >= 700000
1429 /*********************************************************************
1431 * Setup MSIX: this is a prereq for doing Multiqueue/RSS.
1433 **********************************************************************/
1435 ixgbe_setup_msix(struct adapter *adapter)
1437 device_t dev = adapter->dev;
1438 struct tx_ring *txr = adapter->tx_rings;
1439 struct rx_ring *rxr = adapter->rx_rings;
1440 int error, vector = 0;
1442 /* TX setup: the code is here for multi tx,
1443 there are other parts of the driver not ready for it */
1444 for (int i = 0; i < adapter->num_tx_queues; i++, vector++, txr++) {
1445 adapter->res[vector] = bus_alloc_resource_any(dev,
1446 SYS_RES_IRQ, &adapter->rid[vector],
1447 RF_SHAREABLE | RF_ACTIVE);
1448 if (!adapter->res[vector]) {
1449 device_printf(dev,"Unable to allocate"
1450 " bus resource: tx interrupt [%d]\n", vector);
1453 /* Set the handler function */
1454 error = bus_setup_intr(dev, adapter->res[vector],
1455 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1456 ixgbe_msix_tx, txr, &adapter->tag[vector]);
1458 adapter->res[vector] = NULL;
1459 device_printf(dev, "Failed to register TX handler");
1466 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rxr++) {
1467 adapter->res[vector] = bus_alloc_resource_any(dev,
1468 SYS_RES_IRQ, &adapter->rid[vector],
1469 RF_SHAREABLE | RF_ACTIVE);
1470 if (!adapter->res[vector]) {
1471 device_printf(dev,"Unable to allocate"
1472 " bus resource: rx interrupt [%d],"
1473 "rid = %d\n", i, adapter->rid[vector]);
1476 /* Set the handler function */
1477 error = bus_setup_intr(dev, adapter->res[vector],
1478 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_rx,
1479 rxr, &adapter->tag[vector]);
1481 adapter->res[vector] = NULL;
1482 device_printf(dev, "Failed to register RX handler");
1488 /* Now for Link changes */
1489 adapter->res[vector] = bus_alloc_resource_any(dev,
1490 SYS_RES_IRQ, &adapter->rid[vector], RF_SHAREABLE | RF_ACTIVE);
1491 if (!adapter->res[vector]) {
1492 device_printf(dev,"Unable to allocate"
1493 " bus resource: Link interrupt [%d]\n", adapter->rid[vector]);
1496 /* Set the link handler function */
1497 error = bus_setup_intr(dev, adapter->res[vector],
1498 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_link,
1499 adapter, &adapter->tag[vector]);
1501 adapter->res[vector] = NULL;
1502 device_printf(dev, "Failed to register LINK handler");
1512 ixgbe_allocate_pci_resources(struct adapter *adapter)
1515 device_t dev = adapter->dev;
1518 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1521 if (!(adapter->res_memory)) {
1522 device_printf(dev,"Unable to allocate bus resource: memory\n");
1526 adapter->osdep.mem_bus_space_tag =
1527 rman_get_bustag(adapter->res_memory);
1528 adapter->osdep.mem_bus_space_handle =
1529 rman_get_bushandle(adapter->res_memory);
1530 adapter->hw.hw_addr = (uint8_t *) &adapter->osdep.mem_bus_space_handle;
1533 * First try to setup MSI/X interrupts,
1534 * if that fails fall back to Legacy.
1536 if (ixgbe_allocate_msix_resources(adapter)) {
1539 adapter->num_tx_queues = 1;
1540 adapter->num_rx_queues = 1;
1543 #if __FreeBSD_version >= 700000
1544 /* Attempt to use MSI */
1545 val = pci_msi_count(dev);
1546 if ((val) && pci_alloc_msi(dev, &val) == 0) {
1547 adapter->rid[0] = 1;
1548 device_printf(dev, "MSI Interrupts enabled\n");
1552 adapter->rid[0] = 0;
1553 device_printf(dev, "Legacy Interrupts enabled\n");
1555 adapter->res[0] = bus_alloc_resource_any(dev,
1556 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
1557 if (adapter->res[0] == NULL) {
1558 device_printf(dev, "Unable to allocate bus "
1559 "resource: interrupt\n");
1562 /* Set the handler contexts */
1563 TASK_INIT(&adapter->rxtx_task, 0, ixgbe_rxtx, adapter);
1564 TASK_INIT(&adapter->link_task, 0, ixgbe_link, adapter);
1565 adapter->tq = taskqueue_create_fast("ix_taskq", M_NOWAIT,
1566 taskqueue_thread_enqueue, &adapter->tq);
1567 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
1568 device_get_nameunit(adapter->dev));
1569 #if __FreeBSD_version < 700000
1570 error = bus_setup_intr(dev, adapter->res[0],
1571 INTR_TYPE_NET | INTR_FAST, ixgbe_fast_irq,
1573 error = bus_setup_intr(dev, adapter->res[0],
1574 INTR_TYPE_NET, ixgbe_fast_irq, NULL,
1576 adapter, &adapter->tag[0]);
1578 adapter->res[0] = NULL;
1579 device_printf(dev, "Failed to register"
1580 " Fast Legacy handler");
1585 adapter->hw.back = &adapter->osdep;
1589 #if __FreeBSD_version >= 700000
1591 * Attempt to configure MSI/X, the prefered
1595 ixgbe_allocate_msix_resources(struct adapter *adapter)
1597 int error, val, want, rid;
1598 device_t dev = adapter->dev;
1602 /* First map the MSIX table */
1604 adapter->res_msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1606 if (!adapter->res_msix) {
1607 device_printf(dev,"Unable to map MSIX table \n");
1611 /* Now figure out now many vectors we need to use */
1612 val = pci_msix_count(dev);
1614 /* check configured values */
1615 want = ixgbe_tx_queues + ixgbe_rx_queues + ixgbe_other_queues;
1617 * We arent going to do anything fancy for now,
1618 * we either can meet desired config or we fail.
1625 /* Initialize the resource arrays */
1626 for (int i = 0; i < IXGBE_MSGS; i++, vector++) {
1627 adapter->rid[i] = vector;
1628 adapter->tag[i] = NULL;
1629 adapter->res[i] = NULL;
1632 adapter->num_tx_queues = ixgbe_tx_queues;
1633 adapter->num_rx_queues = ixgbe_rx_queues;
1635 /* Now allocate the vectors */
1636 if ((error = pci_alloc_msix(dev, &val)) == 0) {
1639 "MSI/X enabled with %d vectors\n", val);
1642 "FAIL pci_alloc_msix() %d\n", error);
1647 #else /* FreeBSD 6.2 */
1649 ixgbe_allocate_msix_resources(struct adapter *adapter)
1651 return (1); /* Force Legacy behavior for 6.2 */
1656 ixgbe_free_pci_resources(struct adapter * adapter)
1658 device_t dev = adapter->dev;
1662 * Legacy has this set to 0, but we need
1663 * to run this once, so reset it.
1666 loop = adapter->msix;
1670 * First release all the interrupt resources:
1671 * notice that since these are just kept
1672 * in an array we can do the same logic
1673 * whether its MSIX or just legacy.
1675 for (i = 0; i < loop; i++) {
1676 if (adapter->tag[i] != NULL) {
1677 bus_teardown_intr(dev, adapter->res[i],
1679 adapter->tag[i] = NULL;
1681 if (adapter->res[i] != NULL) {
1682 bus_release_resource(dev, SYS_RES_IRQ,
1683 adapter->rid[i], adapter->res[i]);
1687 #if __FreeBSD_version >= 700000
1688 pci_release_msi(dev);
1690 if (adapter->res_memory != NULL)
1691 bus_release_resource(dev, SYS_RES_MEMORY,
1692 IXGBE_MMBA, adapter->res_memory);
1697 /*********************************************************************
1699 * Initialize the hardware to a configuration as specified by the
1700 * adapter structure. The controller is reset, the EEPROM is
1701 * verified, the MAC address is set, then the shared initialization
1702 * routines are called.
1704 **********************************************************************/
1706 ixgbe_hardware_init(struct adapter *adapter)
1708 device_t dev = adapter->dev;
1712 /* Issue a global reset */
1713 adapter->hw.adapter_stopped = FALSE;
1714 ixgbe_stop_adapter(&adapter->hw);
1716 /* Make sure we have a good EEPROM before we read from it */
1717 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
1718 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
1722 /* Get Hardware Flow Control setting */
1723 adapter->hw.fc.original_type = ixgbe_fc_full;
1724 adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
1725 adapter->hw.fc.low_water = IXGBE_FC_LO;
1726 adapter->hw.fc.high_water = IXGBE_FC_HI;
1727 adapter->hw.fc.send_xon = TRUE;
1729 if (ixgbe_init_hw(&adapter->hw)) {
1730 device_printf(dev,"Hardware Initialization Failed");
1737 /*********************************************************************
1739 * Setup networking device structure and register an interface.
1741 **********************************************************************/
1743 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1746 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1748 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1750 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1751 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1752 ifp->if_mtu = ETHERMTU;
1753 ifp->if_baudrate = 1000000000;
1754 ifp->if_init = ixgbe_init;
1755 ifp->if_softc = adapter;
1756 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1757 ifp->if_ioctl = ixgbe_ioctl;
1758 ifp->if_start = ixgbe_start;
1760 ifp->if_watchdog = NULL;
1761 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1763 ether_ifattach(ifp, adapter->hw.mac.addr);
1765 adapter->max_frame_size =
1766 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1769 * Tell the upper layer(s) we support long frames.
1771 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1773 if (adapter->msix) /* RSS and HWCSUM not compatible */
1774 ifp->if_capabilities |= IFCAP_TSO4;
1776 ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4);
1777 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1778 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1780 ifp->if_capenable = ifp->if_capabilities;
1783 * Specify the media types supported by this adapter and register
1784 * callbacks to update media and link information
1786 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1787 ixgbe_media_status);
1788 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR |
1790 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1791 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1796 /********************************************************************
1797 * Manage DMA'able memory.
1798 *******************************************************************/
1800 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1804 *(bus_addr_t *) arg = segs->ds_addr;
1809 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
1810 struct ixgbe_dma_alloc *dma, int mapflags)
1812 device_t dev = adapter->dev;
1815 r = bus_dma_tag_create(NULL, /* parent */
1816 PAGE_SIZE, 0, /* alignment, bounds */
1817 BUS_SPACE_MAXADDR, /* lowaddr */
1818 BUS_SPACE_MAXADDR, /* highaddr */
1819 NULL, NULL, /* filter, filterarg */
1822 size, /* maxsegsize */
1823 BUS_DMA_ALLOCNOW, /* flags */
1824 NULL, /* lockfunc */
1825 NULL, /* lockfuncarg */
1828 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
1832 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1833 BUS_DMA_NOWAIT, &dma->dma_map);
1835 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1839 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1843 mapflags | BUS_DMA_NOWAIT);
1845 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
1849 dma->dma_size = size;
1852 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1854 bus_dma_tag_destroy(dma->dma_tag);
1856 dma->dma_map = NULL;
1857 dma->dma_tag = NULL;
1862 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
1864 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1865 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1866 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1867 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1868 bus_dma_tag_destroy(dma->dma_tag);
1872 /*********************************************************************
1874 * Allocate memory for the transmit and receive rings, and then
1875 * the descriptors associated with each, called only once at attach.
1877 **********************************************************************/
1879 ixgbe_allocate_queues(struct adapter *adapter)
1881 device_t dev = adapter->dev;
1882 struct tx_ring *txr;
1883 struct rx_ring *rxr;
1884 int rsize, tsize, error = IXGBE_SUCCESS;
1885 int txconf = 0, rxconf = 0;
1887 /* First allocate the TX ring struct memory */
1888 if (!(adapter->tx_rings =
1889 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
1890 adapter->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1891 device_printf(dev, "Unable to allocate TX ring memory\n");
1895 txr = adapter->tx_rings;
1897 /* Next allocate the RX */
1898 if (!(adapter->rx_rings =
1899 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
1900 adapter->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1901 device_printf(dev, "Unable to allocate RX ring memory\n");
1905 rxr = adapter->rx_rings;
1907 tsize = roundup2(adapter->num_tx_desc *
1908 sizeof(union ixgbe_adv_tx_desc), 4096);
1910 * Now set up the TX queues, txconf is needed to handle the
1911 * possibility that things fail midcourse and we need to
1912 * undo memory gracefully
1914 for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) {
1915 char name_string[16];
1916 /* Set up some basics */
1917 txr = &adapter->tx_rings[i];
1918 txr->adapter = adapter;
1921 * Initialize the TX side lock
1922 * -this has to change for multi tx
1924 snprintf(name_string, sizeof(name_string), "%s:tx",
1925 device_get_nameunit(dev));
1926 mtx_init(&adapter->tx_mtx, name_string, NULL, MTX_DEF);
1928 if (ixgbe_dma_malloc(adapter, tsize,
1929 &txr->txdma, BUS_DMA_NOWAIT)) {
1931 "Unable to allocate TX Descriptor memory\n");
1935 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1936 bzero((void *)txr->tx_base, tsize);
1938 /* Now allocate transmit buffers for the ring */
1939 if (ixgbe_allocate_transmit_buffers(txr)) {
1941 "Critical Failure setting up transmit buffers\n");
1949 * Next the RX queues...
1951 rsize = roundup2(adapter->num_rx_desc *
1952 sizeof(union ixgbe_adv_rx_desc), 4096);
1953 for (int i = 0; i < adapter->num_rx_queues; i++, rxconf++) {
1954 rxr = &adapter->rx_rings[i];
1955 /* Set up some basics */
1956 rxr->adapter = adapter;
1959 if (ixgbe_dma_malloc(adapter, rsize,
1960 &rxr->rxdma, BUS_DMA_NOWAIT)) {
1962 "Unable to allocate RxDescriptor memory\n");
1966 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1967 bzero((void *)rxr->rx_base, rsize);
1969 /* Allocate receive buffers for the ring*/
1970 if (ixgbe_allocate_receive_buffers(rxr)) {
1972 "Critical Failure setting up receive buffers\n");
1981 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
1982 ixgbe_dma_free(adapter, &rxr->rxdma);
1984 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
1985 ixgbe_dma_free(adapter, &txr->txdma);
1986 free(adapter->rx_rings, M_DEVBUF);
1988 free(adapter->tx_rings, M_DEVBUF);
1993 /*********************************************************************
1995 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1996 * the information needed to transmit a packet on the wire. This is
1997 * called only once at attach, setup is done every reset.
1999 **********************************************************************/
2001 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2003 struct adapter *adapter = txr->adapter;
2004 device_t dev = adapter->dev;
2005 struct ixgbe_tx_buf *txbuf;
2009 * Setup DMA descriptor areas.
2011 if ((error = bus_dma_tag_create(NULL, /* parent */
2012 PAGE_SIZE, 0, /* alignment, bounds */
2013 BUS_SPACE_MAXADDR, /* lowaddr */
2014 BUS_SPACE_MAXADDR, /* highaddr */
2015 NULL, NULL, /* filter, filterarg */
2016 IXGBE_TSO_SIZE, /* maxsize */
2017 IXGBE_MAX_SCATTER, /* nsegments */
2018 PAGE_SIZE, /* maxsegsize */
2020 NULL, /* lockfunc */
2021 NULL, /* lockfuncarg */
2023 device_printf(dev,"Unable to allocate TX DMA tag\n");
2027 if (!(txr->tx_buffers =
2028 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2029 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2030 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2035 /* Create the descriptor buffer dma maps */
2036 txbuf = txr->tx_buffers;
2037 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2038 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2040 device_printf(dev, "Unable to create TX DMA map\n");
2047 /* We free all, it handles case where we are in the middle */
2048 ixgbe_free_transmit_structures(adapter);
2052 /*********************************************************************
2054 * Initialize a transmit ring.
2056 **********************************************************************/
2058 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2060 struct adapter *adapter = txr->adapter;
2061 struct ixgbe_tx_buf *txbuf;
2064 /* Clear the old ring contents */
2065 bzero((void *)txr->tx_base,
2066 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2068 txr->next_avail_tx_desc = 0;
2069 txr->next_tx_to_clean = 0;
2071 /* Free any existing tx buffers. */
2072 txbuf = txr->tx_buffers;
2073 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2074 if (txbuf->m_head != NULL) {
2075 bus_dmamap_sync(txr->txtag, txbuf->map,
2076 BUS_DMASYNC_POSTWRITE);
2077 bus_dmamap_unload(txr->txtag, txbuf->map);
2078 m_freem(txbuf->m_head);
2079 txbuf->m_head = NULL;
2081 /* clear the watch index */
2082 txbuf->next_eop = -1;
2085 /* Set number of descriptors available */
2086 txr->tx_avail = adapter->num_tx_desc;
2088 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2089 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2093 /*********************************************************************
2095 * Initialize all transmit rings.
2097 **********************************************************************/
2099 ixgbe_setup_transmit_structures(struct adapter *adapter)
2101 struct tx_ring *txr = adapter->tx_rings;
2103 for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
2104 ixgbe_setup_transmit_ring(txr);
2109 /*********************************************************************
2111 * Enable transmit unit.
2112 * NOTE: this will need to be changed if there are more than
2113 * one transmit queues.
2114 **********************************************************************/
2116 ixgbe_initialize_transmit_units(struct adapter *adapter)
2118 struct tx_ring *txr = adapter->tx_rings;
2119 uint64_t tdba = txr->txdma.dma_paddr;
2121 /* Setup the Base and Length of the Tx Descriptor Ring */
2123 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
2124 (tdba & 0x00000000ffffffffULL));
2125 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0), (tdba >> 32));
2126 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
2127 adapter->num_tx_desc *
2128 sizeof(struct ixgbe_legacy_tx_desc));
2130 /* Setup the HW Tx Head and Tail descriptor pointers */
2131 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
2132 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
2134 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT);
2136 /* Setup Transmit Descriptor Cmd Settings */
2137 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2142 /*********************************************************************
2144 * Free all transmit rings.
2146 **********************************************************************/
2148 ixgbe_free_transmit_structures(struct adapter *adapter)
2150 struct tx_ring *txr = adapter->tx_rings;
2151 mtx_lock(&adapter->tx_mtx);
2152 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2153 ixgbe_free_transmit_buffers(txr);
2154 ixgbe_dma_free(adapter, &txr->txdma);
2156 mtx_unlock(&adapter->tx_mtx);
2157 mtx_destroy(&adapter->tx_mtx);
2158 free(adapter->tx_rings, M_DEVBUF);
2161 /*********************************************************************
2163 * Free transmit ring related data structures.
2165 **********************************************************************/
2167 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2169 struct adapter *adapter = txr->adapter;
2170 struct ixgbe_tx_buf *tx_buffer;
2173 INIT_DEBUGOUT("free_transmit_ring: begin");
2175 if (txr->tx_buffers == NULL)
2178 tx_buffer = txr->tx_buffers;
2179 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2180 if (tx_buffer->m_head != NULL) {
2181 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2182 BUS_DMASYNC_POSTWRITE);
2183 bus_dmamap_unload(txr->txtag,
2185 m_freem(tx_buffer->m_head);
2186 tx_buffer->m_head = NULL;
2187 if (tx_buffer->map != NULL) {
2188 bus_dmamap_destroy(txr->txtag,
2190 tx_buffer->map = NULL;
2192 } else if (tx_buffer->map != NULL) {
2193 bus_dmamap_unload(txr->txtag,
2195 bus_dmamap_destroy(txr->txtag,
2197 tx_buffer->map = NULL;
2201 if (txr->tx_buffers != NULL) {
2202 free(txr->tx_buffers, M_DEVBUF);
2203 txr->tx_buffers = NULL;
2205 if (txr->txtag != NULL) {
2206 bus_dma_tag_destroy(txr->txtag);
2212 /*********************************************************************
2214 * Advanced Context Descriptor setup for VLAN or CSUM
2216 **********************************************************************/
2219 ixgbe_tx_csum_setup(struct tx_ring *txr, struct mbuf *mp)
2221 struct adapter *adapter = txr->adapter;
2222 struct ixgbe_adv_tx_context_desc *TXD;
2223 struct ixgbe_tx_buf *tx_buffer;
2224 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2225 struct ether_vlan_header *eh;
2227 struct ip6_hdr *ip6;
2228 int ehdrlen, ip_hlen;
2231 int ctxd = txr->next_avail_tx_desc;
2232 #if __FreeBSD_version < 700000
2239 tx_buffer = &txr->tx_buffers[ctxd];
2240 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2243 ** In advanced descriptors the vlan tag must
2244 ** be placed into the descriptor itself.
2246 #if __FreeBSD_version < 700000
2247 mtag = VLAN_OUTPUT_TAG(ifp, mp);
2250 htole16(VLAN_TAG_VALUE(mtag)) << IXGBE_ADVTXD_VLAN_SHIFT;
2252 if (mp->m_flags & M_VLANTAG) {
2253 vtag = htole16(mp->m_pkthdr.ether_vtag);
2254 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2258 * Determine where frame payload starts.
2259 * Jump over vlan headers if already present,
2260 * helpful for QinQ too.
2262 eh = mtod(mp, struct ether_vlan_header *);
2263 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2264 etype = ntohs(eh->evl_proto);
2265 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2267 etype = ntohs(eh->evl_encap_proto);
2268 ehdrlen = ETHER_HDR_LEN;
2271 /* Set the ether header length */
2272 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2276 ip = (struct ip *)(mp->m_data + ehdrlen);
2277 ip_hlen = ip->ip_hl << 2;
2278 if (mp->m_len < ehdrlen + ip_hlen)
2279 return FALSE; /* failure */
2281 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2283 case ETHERTYPE_IPV6:
2284 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2285 ip_hlen = sizeof(struct ip6_hdr);
2286 if (mp->m_len < ehdrlen + ip_hlen)
2287 return FALSE; /* failure */
2288 ipproto = ip6->ip6_nxt;
2289 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2295 vlan_macip_lens |= ip_hlen;
2296 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2300 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2301 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2304 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2305 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2309 /* Now copy bits into descriptor */
2310 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2311 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2312 TXD->seqnum_seed = htole32(0);
2313 TXD->mss_l4len_idx = htole32(0);
2315 #ifndef NO_82598_A0_SUPPORT
2316 if (adapter->hw.revision_id == 0)
2320 tx_buffer->m_head = NULL;
2321 tx_buffer->next_eop = -1;
2323 /* We've consumed the first desc, adjust counters */
2324 if (++ctxd == adapter->num_tx_desc)
2326 txr->next_avail_tx_desc = ctxd;
2332 #if __FreeBSD_version >= 700000
2333 /**********************************************************************
2335 * Setup work for hardware segmentation offload (TSO) on
2336 * adapters using advanced tx descriptors
2338 **********************************************************************/
2340 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2342 struct adapter *adapter = txr->adapter;
2343 struct ixgbe_adv_tx_context_desc *TXD;
2344 struct ixgbe_tx_buf *tx_buffer;
2345 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2346 u32 mss_l4len_idx = 0;
2348 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2349 struct ether_vlan_header *eh;
2353 if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
2354 (mp->m_pkthdr.len <= IXGBE_TX_BUFFER_SIZE))
2358 * Determine where frame payload starts.
2359 * Jump over vlan headers if already present
2361 eh = mtod(mp, struct ether_vlan_header *);
2362 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2363 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2365 ehdrlen = ETHER_HDR_LEN;
2367 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2368 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2371 ctxd = txr->next_avail_tx_desc;
2372 tx_buffer = &txr->tx_buffers[ctxd];
2373 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2375 ip = (struct ip *)(mp->m_data + ehdrlen);
2376 if (ip->ip_p != IPPROTO_TCP)
2377 return FALSE; /* 0 */
2380 ip_hlen = ip->ip_hl << 2;
2381 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2382 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2383 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2384 tcp_hlen = th->th_off << 2;
2385 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2386 /* This is used in the transmit desc in encap */
2387 *paylen = mp->m_pkthdr.len - hdrlen;
2389 /* VLAN MACLEN IPLEN */
2390 if (mp->m_flags & M_VLANTAG) {
2391 vtag = htole16(mp->m_pkthdr.ether_vtag);
2392 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2395 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2396 vlan_macip_lens |= ip_hlen;
2397 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2399 /* ADV DTYPE TUCMD */
2400 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2401 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2402 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2403 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2407 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2408 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2409 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2411 TXD->seqnum_seed = htole32(0);
2412 tx_buffer->m_head = NULL;
2413 tx_buffer->next_eop = -1;
2415 #ifndef NO_82598_A0_SUPPORT
2416 if (adapter->hw.revision_id == 0)
2420 if (++ctxd == adapter->num_tx_desc)
2424 txr->next_avail_tx_desc = ctxd;
2428 #else /* For 6.2 RELEASE */
2429 /* This makes it easy to keep the code common */
2431 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2437 /**********************************************************************
2439 * Examine each tx_buffer in the used queue. If the hardware is done
2440 * processing the packet then free associated resources. The
2441 * tx_buffer is put back on the free queue.
2443 **********************************************************************/
2445 ixgbe_txeof(struct tx_ring *txr)
2447 struct adapter * adapter = txr->adapter;
2448 struct ifnet *ifp = adapter->ifp;
2449 int first, last, done, num_avail;
2450 struct ixgbe_tx_buf *tx_buffer;
2451 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2453 mtx_assert(&adapter->tx_mtx, MA_OWNED);
2455 if (txr->tx_avail == adapter->num_tx_desc)
2458 num_avail = txr->tx_avail;
2459 first = txr->next_tx_to_clean;
2461 tx_buffer = &txr->tx_buffers[first];
2462 /* For cleanup we just use legacy struct */
2463 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2464 last = tx_buffer->next_eop;
2468 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2471 * What this does is get the index of the
2472 * first descriptor AFTER the EOP of the
2473 * first packet, that way we can do the
2474 * simple comparison on the inner while loop
2477 if (++last == adapter->num_tx_desc) last = 0;
2480 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2481 BUS_DMASYNC_POSTREAD);
2483 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2484 /* We clean the range of the packet */
2485 while (first != done) {
2486 tx_desc->upper.data = 0;
2487 tx_desc->lower.data = 0;
2488 tx_desc->buffer_addr = 0;
2491 if (tx_buffer->m_head) {
2493 bus_dmamap_sync(txr->txtag,
2495 BUS_DMASYNC_POSTWRITE);
2496 bus_dmamap_unload(txr->txtag,
2498 m_freem(tx_buffer->m_head);
2499 tx_buffer->m_head = NULL;
2500 tx_buffer->map = NULL;
2502 tx_buffer->next_eop = -1;
2504 if (++first == adapter->num_tx_desc)
2507 tx_buffer = &txr->tx_buffers[first];
2509 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2511 /* See if we can continue to the next packet */
2512 last = tx_buffer->next_eop;
2515 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2516 /* Get new done point */
2517 if (++last == adapter->num_tx_desc) last = 0;
2523 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2524 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2526 txr->next_tx_to_clean = first;
2529 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2530 * it is OK to send packets. If there are no pending descriptors,
2531 * clear the timeout. Otherwise, if some descriptors have been freed,
2532 * restart the timeout.
2534 if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
2535 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2536 /* If all are clean turn off the timer */
2537 if (num_avail == adapter->num_tx_desc)
2538 adapter->watchdog_timer = 0;
2539 /* Some were cleaned, so reset timer */
2540 else if (num_avail == txr->tx_avail)
2541 adapter->watchdog_timer = IXGBE_TX_TIMEOUT;
2544 txr->tx_avail = num_avail;
2548 /*********************************************************************
2550 * Get a buffer from system mbuf buffer pool.
2552 **********************************************************************/
2554 ixgbe_get_buf(struct rx_ring *rxr, int i)
2556 struct adapter *adapter = rxr->adapter;
2559 int nsegs, error, old, s = 0;
2560 int size = MCLBYTES;
2563 bus_dma_segment_t segs[1];
2564 struct ixgbe_rx_buf *rxbuf;
2566 /* Are we going to Jumbo clusters? */
2567 if (adapter->bigbufs) {
2568 size = MJUMPAGESIZE;
2572 mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
2574 adapter->mbuf_alloc_failed++;
2578 mp->m_len = mp->m_pkthdr.len = size;
2580 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2581 m_adj(mp, ETHER_ALIGN);
2584 * Using memory from the mbuf cluster pool, invoke the bus_dma
2585 * machinery to arrange the memory mapping.
2587 error = bus_dmamap_load_mbuf_sg(rxr->rxtag[s], rxr->spare_map[s],
2588 mp, segs, &nsegs, BUS_DMA_NOWAIT);
2594 /* Now check our target buffer for existing mapping */
2595 rxbuf = &rxr->rx_buffers[i];
2596 old = rxbuf->bigbuf;
2597 if (rxbuf->m_head != NULL)
2598 bus_dmamap_unload(rxr->rxtag[old], rxbuf->map[old]);
2600 map = rxbuf->map[old];
2601 rxbuf->map[s] = rxr->spare_map[s];
2602 rxr->spare_map[old] = map;
2603 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s], BUS_DMASYNC_PREREAD);
2607 rxr->rx_base[i].read.pkt_addr = htole64(segs[0].ds_addr);
2609 #ifndef NO_82598_A0_SUPPORT
2610 /* A0 needs to One's Compliment descriptors */
2611 if (adapter->hw.revision_id == 0) {
2612 struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
2615 d = (struct dhack *)&rxr->rx_base[i];
2624 /*********************************************************************
2626 * Allocate memory for rx_buffer structures. Since we use one
2627 * rx_buffer per received packet, the maximum number of rx_buffer's
2628 * that we'll need is equal to the number of receive descriptors
2629 * that we've allocated.
2631 **********************************************************************/
2633 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2635 struct adapter *adapter = rxr->adapter;
2636 device_t dev = adapter->dev;
2637 struct ixgbe_rx_buf *rxbuf;
2638 int i, bsize, error;
2640 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
2641 if (!(rxr->rx_buffers =
2642 (struct ixgbe_rx_buf *) malloc(bsize,
2643 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2644 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2649 /* First make the small (2K) tag/map */
2650 if ((error = bus_dma_tag_create(NULL, /* parent */
2651 PAGE_SIZE, 0, /* alignment, bounds */
2652 BUS_SPACE_MAXADDR, /* lowaddr */
2653 BUS_SPACE_MAXADDR, /* highaddr */
2654 NULL, NULL, /* filter, filterarg */
2655 MCLBYTES, /* maxsize */
2657 MCLBYTES, /* maxsegsize */
2659 NULL, /* lockfunc */
2660 NULL, /* lockfuncarg */
2662 device_printf(dev, "Unable to create RX Small DMA tag\n");
2666 /* Next make the large (4K) tag/map */
2667 if ((error = bus_dma_tag_create(NULL, /* parent */
2668 PAGE_SIZE, 0, /* alignment, bounds */
2669 BUS_SPACE_MAXADDR, /* lowaddr */
2670 BUS_SPACE_MAXADDR, /* highaddr */
2671 NULL, NULL, /* filter, filterarg */
2672 MJUMPAGESIZE, /* maxsize */
2674 MJUMPAGESIZE, /* maxsegsize */
2676 NULL, /* lockfunc */
2677 NULL, /* lockfuncarg */
2679 device_printf(dev, "Unable to create RX Large DMA tag\n");
2683 /* Create the spare maps (used by getbuf) */
2684 error = bus_dmamap_create(rxr->rxtag[0], BUS_DMA_NOWAIT,
2685 &rxr->spare_map[0]);
2686 error = bus_dmamap_create(rxr->rxtag[1], BUS_DMA_NOWAIT,
2687 &rxr->spare_map[1]);
2689 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
2694 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2695 rxbuf = &rxr->rx_buffers[i];
2696 error = bus_dmamap_create(rxr->rxtag[0],
2697 BUS_DMA_NOWAIT, &rxbuf->map[0]);
2699 device_printf(dev, "Unable to create Small RX DMA map\n");
2702 error = bus_dmamap_create(rxr->rxtag[1],
2703 BUS_DMA_NOWAIT, &rxbuf->map[1]);
2705 device_printf(dev, "Unable to create Large RX DMA map\n");
2713 /* Frees all, but can handle partial completion */
2714 ixgbe_free_receive_structures(adapter);
2718 /*********************************************************************
2720 * Initialize a receive ring and its buffers.
2722 **********************************************************************/
2724 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2726 struct adapter *adapter;
2727 struct ixgbe_rx_buf *rxbuf;
2730 adapter = rxr->adapter;
2731 rsize = roundup2(adapter->num_rx_desc *
2732 sizeof(union ixgbe_adv_rx_desc), 4096);
2733 /* Clear the ring contents */
2734 bzero((void *)rxr->rx_base, rsize);
2737 ** Free current RX buffers: the size buffer
2738 ** that is loaded is indicated by the buffer
2741 for (int i = 0; i < adapter->num_rx_desc; i++) {
2742 rxbuf = &rxr->rx_buffers[i];
2744 if (rxbuf->m_head != NULL) {
2745 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2746 BUS_DMASYNC_POSTREAD);
2747 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2748 m_freem(rxbuf->m_head);
2749 rxbuf->m_head = NULL;
2753 for (j = 0; j < adapter->num_rx_desc; j++) {
2754 if (ixgbe_get_buf(rxr, j) == ENOBUFS) {
2755 rxr->rx_buffers[j].m_head = NULL;
2756 rxr->rx_base[j].read.pkt_addr = 0;
2757 /* If we fail some may have change size */
2758 s = adapter->bigbufs;
2763 /* Setup our descriptor indices */
2764 rxr->next_to_check = 0;
2765 rxr->last_cleaned = 0;
2767 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2768 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2773 * We need to clean up any buffers allocated so far
2774 * 'j' is the failing index, decrement it to get the
2777 for (--j; j < 0; j--) {
2778 rxbuf = &rxr->rx_buffers[j];
2779 if (rxbuf->m_head != NULL) {
2780 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2781 BUS_DMASYNC_POSTREAD);
2782 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2783 m_freem(rxbuf->m_head);
2784 rxbuf->m_head = NULL;
2790 /*********************************************************************
2792 * Initialize all receive rings.
2794 **********************************************************************/
2796 ixgbe_setup_receive_structures(struct adapter *adapter)
2798 struct rx_ring *rxr = adapter->rx_rings;
2801 for (i = 0; i < adapter->num_rx_queues; i++, rxr++)
2802 if (ixgbe_setup_receive_ring(rxr))
2808 * Free RX buffers allocated so far, we will only handle
2809 * the rings that completed, the failing case will have
2810 * cleaned up for itself. The value of 'i' will be the
2811 * failed ring so we must pre-decrement it.
2813 rxr = adapter->rx_rings;
2814 for (--i; i > 0; i--, rxr++) {
2815 for (j = 0; j < adapter->num_rx_desc; j++) {
2816 struct ixgbe_rx_buf *rxbuf;
2817 rxbuf = &rxr->rx_buffers[j];
2819 if (rxbuf->m_head != NULL) {
2820 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2821 BUS_DMASYNC_POSTREAD);
2822 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2823 m_freem(rxbuf->m_head);
2824 rxbuf->m_head = NULL;
2832 /*********************************************************************
2834 * Enable receive unit.
2836 **********************************************************************/
2838 ixgbe_initialize_receive_units(struct adapter *adapter)
2840 struct rx_ring *rxr = adapter->rx_rings;
2841 struct ifnet *ifp = adapter->ifp;
2842 u32 rxctrl, fctrl, srrctl, rxcsum;
2843 u32 reta, mrqc, hlreg, linkvec;
2848 * Make sure receives are disabled while
2849 * setting up the descriptor ring
2851 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
2852 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
2853 rxctrl & ~IXGBE_RXCTRL_RXEN);
2855 /* Enable broadcasts */
2856 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2857 fctrl |= IXGBE_FCTRL_BAM;
2858 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2860 hlreg = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
2861 if (ifp->if_mtu > ETHERMTU)
2862 hlreg |= IXGBE_HLREG0_JUMBOEN;
2864 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2865 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, hlreg);
2867 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
2868 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2869 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2870 if (adapter->bigbufs)
2871 srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2873 srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2874 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2875 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
2877 /* Set Queue moderation rate */
2878 for (int i = 0; i < IXGBE_MSGS; i++)
2879 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(i), DEFAULT_ITR);
2881 /* Set Link moderation lower */
2882 linkvec = adapter->num_tx_queues + adapter->num_rx_queues;
2883 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(linkvec), LINK_ITR);
2885 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
2886 u64 rdba = rxr->rxdma.dma_paddr;
2887 /* Setup the Base and Length of the Rx Descriptor Ring */
2888 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(i),
2889 (rdba & 0x00000000ffffffffULL));
2890 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(i), (rdba >> 32));
2891 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(i),
2892 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2894 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2895 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(i), 0);
2896 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(i),
2897 adapter->num_rx_desc - 1);
2900 if (adapter->num_rx_queues > 1) {
2901 /* set up random bits */
2902 arc4rand(&random, sizeof(random), 0);
2903 switch (adapter->num_rx_queues) {
2915 /* Set up the redirection table */
2916 for (int i = 0; i < 32; i++) {
2917 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RETA(i), reta);
2918 if (adapter->num_rx_queues > 4) {
2920 IXGBE_WRITE_REG(&adapter->hw,
2921 IXGBE_RETA(i), 0x04050607);
2925 /* Now fill our hash function seeds */
2926 for (int i = 0; i < 10; i++)
2927 IXGBE_WRITE_REG_ARRAY(&adapter->hw,
2928 IXGBE_RSSRK(0), i, random[i]);
2930 mrqc = IXGBE_MRQC_RSSEN
2931 /* Perform hash on these packet types */
2932 | IXGBE_MRQC_RSS_FIELD_IPV4
2933 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2934 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2935 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2936 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2937 | IXGBE_MRQC_RSS_FIELD_IPV6
2938 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2939 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2940 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2941 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MRQC, mrqc);
2943 /* RSS and RX IPP Checksum are mutually exclusive */
2944 rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM);
2945 rxcsum |= IXGBE_RXCSUM_PCSD;
2946 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum);
2948 rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM);
2949 if (ifp->if_capenable & IFCAP_RXCSUM)
2950 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2951 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum);
2954 /* Enable Receive engine */
2955 rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
2956 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rxctrl);
2961 /*********************************************************************
2963 * Free all receive rings.
2965 **********************************************************************/
2967 ixgbe_free_receive_structures(struct adapter *adapter)
2969 struct rx_ring *rxr = adapter->rx_rings;
2971 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
2972 ixgbe_free_receive_buffers(rxr);
2973 /* Free the ring memory as well */
2974 ixgbe_dma_free(adapter, &rxr->rxdma);
2977 free(adapter->rx_rings, M_DEVBUF);
2980 /*********************************************************************
2982 * Free receive ring data structures
2984 **********************************************************************/
2986 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2988 struct adapter *adapter = NULL;
2989 struct ixgbe_rx_buf *rxbuf = NULL;
2991 INIT_DEBUGOUT("free_receive_buffers: begin");
2992 adapter = rxr->adapter;
2993 if (rxr->rx_buffers != NULL) {
2994 rxbuf = &rxr->rx_buffers[0];
2995 for (int i = 0; i < adapter->num_rx_desc; i++) {
2996 int s = rxbuf->bigbuf;
2997 if (rxbuf->map != NULL) {
2998 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2999 bus_dmamap_destroy(rxr->rxtag[s], rxbuf->map[s]);
3001 if (rxbuf->m_head != NULL) {
3002 m_freem(rxbuf->m_head);
3004 rxbuf->m_head = NULL;
3008 if (rxr->rx_buffers != NULL) {
3009 free(rxr->rx_buffers, M_DEVBUF);
3010 rxr->rx_buffers = NULL;
3012 for (int s = 0; s < 2; s++) {
3013 if (rxr->rxtag[s] != NULL) {
3014 bus_dma_tag_destroy(rxr->rxtag[s]);
3015 rxr->rxtag[s] = NULL;
3021 /*********************************************************************
3023 * This routine executes in interrupt context. It replenishes
3024 * the mbufs in the descriptor and sends data which has been
3025 * dma'ed into host memory to upper layer.
3027 * We loop at most count times if count is > 0, or until done if
3030 *********************************************************************/
3032 ixgbe_rxeof(struct rx_ring *rxr, int count)
3034 struct adapter *adapter = rxr->adapter;
3035 struct ifnet *ifp = adapter->ifp;
3037 int len, i, eop = 0;
3038 uint8_t accept_frame = 0;
3040 union ixgbe_adv_rx_desc *cur;
3043 i = rxr->next_to_check;
3044 cur = &rxr->rx_base[i];
3045 staterr = cur->wb.upper.status_error;
3047 if (!(staterr & IXGBE_RXD_STAT_DD))
3050 while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
3051 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3052 struct mbuf *m = NULL;
3055 mp = rxr->rx_buffers[i].m_head;
3056 s = rxr->rx_buffers[i].bigbuf;
3057 bus_dmamap_sync(rxr->rxtag[s], rxr->rx_buffers[i].map[s],
3058 BUS_DMASYNC_POSTREAD);
3060 if (staterr & IXGBE_RXD_STAT_EOP) {
3066 len = cur->wb.upper.length;
3068 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
3072 /* Get a fresh buffer first */
3073 if (ixgbe_get_buf(rxr, i) != 0) {
3078 /* Assign correct length to the current fragment */
3081 if (rxr->fmp == NULL) {
3082 mp->m_pkthdr.len = len;
3083 rxr->fmp = mp; /* Store the first mbuf */
3086 /* Chain mbuf's together */
3087 mp->m_flags &= ~M_PKTHDR;
3088 rxr->lmp->m_next = mp;
3089 rxr->lmp = rxr->lmp->m_next;
3090 rxr->fmp->m_pkthdr.len += len;
3094 rxr->fmp->m_pkthdr.rcvif = ifp;
3096 rxr->packet_count++;
3097 rxr->byte_count += rxr->fmp->m_pkthdr.len;
3099 ixgbe_rx_checksum(adapter,
3102 if (staterr & IXGBE_RXD_STAT_VP) {
3103 #if __FreeBSD_version < 700000
3104 VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
3105 (le16toh(cur->wb.upper.vlan) &
3106 IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
3108 rxr->fmp->m_pkthdr.ether_vtag =
3109 le16toh(cur->wb.upper.vlan);
3110 rxr->fmp->m_flags |= M_VLANTAG;
3120 /* Reuse loaded DMA map and just update mbuf chain */
3121 mp = rxr->rx_buffers[i].m_head;
3122 mp->m_len = mp->m_pkthdr.len =
3123 (rxr->rx_buffers[i].bigbuf ? MJUMPAGESIZE:MCLBYTES);
3124 mp->m_data = mp->m_ext.ext_buf;
3126 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3127 m_adj(mp, ETHER_ALIGN);
3128 if (rxr->fmp != NULL) {
3136 /* Zero out the receive descriptors status */
3137 cur->wb.upper.status_error = 0;
3138 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3139 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3141 rxr->last_cleaned = i; /* for updating tail */
3143 if (++i == adapter->num_rx_desc)
3146 /* Now send up to the stack */
3148 rxr->next_to_check = i;
3149 (*ifp->if_input)(ifp, m);
3150 i = rxr->next_to_check;
3152 /* Get next descriptor */
3153 cur = &rxr->rx_base[i];
3154 staterr = cur->wb.upper.status_error;
3156 rxr->next_to_check = i;
3158 /* Advance the IXGB's Receive Queue "Tail Pointer" */
3159 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
3161 if (!(staterr & IXGBE_RXD_STAT_DD))
3167 /*********************************************************************
3169 * Verify that the hardware indicated that the checksum is valid.
3170 * Inform the stack about the status of checksum so that stack
3171 * doesn't spend time verifying the checksum.
3173 *********************************************************************/
3175 ixgbe_rx_checksum(struct adapter *adapter,
3176 uint32_t staterr, struct mbuf * mp)
3178 uint16_t status = (uint16_t) staterr;
3179 uint8_t errors = (uint8_t) (staterr >> 24);
3182 if (status & IXGBE_RXD_STAT_IXSM) {
3183 mp->m_pkthdr.csum_flags = 0;
3187 if (status & IXGBE_RXD_STAT_IPCS) {
3189 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3190 /* IP Checksum Good */
3191 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3192 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3195 mp->m_pkthdr.csum_flags = 0;
3197 if (status & IXGBE_RXD_STAT_L4CS) {
3199 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3200 mp->m_pkthdr.csum_flags |=
3201 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3202 mp->m_pkthdr.csum_data = htons(0xffff);
3210 ixgbe_enable_vlans(struct adapter *adapter)
3214 ixgbe_disable_intr(adapter);
3215 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
3216 ctrl |= IXGBE_VLNCTRL_VME;
3217 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3218 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
3219 ixgbe_enable_intr(adapter);
3226 ixgbe_enable_intr(struct adapter *adapter)
3230 /* With RSS set up what to auto clear */
3231 if (adapter->msix) {
3232 mask = IXGBE_EIMS_ENABLE_MASK;
3233 mask &= ~IXGBE_EIMS_OTHER;
3234 mask &= ~IXGBE_EIMS_LSC;
3235 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
3238 mask = IXGBE_EIMS_ENABLE_MASK;
3239 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_ENABLE_MASK);
3240 IXGBE_WRITE_FLUSH(&adapter->hw);
3246 ixgbe_disable_intr(struct adapter *adapter)
3248 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3253 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
3257 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
3264 ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector)
3267 vector |= IXGBE_IVAR_ALLOC_VAL;
3268 index = (entry >> 2) & 0x1F;
3269 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
3270 ivar |= (vector << (8 * (entry & 0x3)));
3271 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3275 ixgbe_configure_ivars(struct adapter *adapter)
3279 for (i = 0, vec = 1; i < adapter->num_rx_queues; i++, vec++)
3280 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), vec);
3282 for (i = 0, vec = 8; i < adapter->num_tx_queues; i++, vec++)
3283 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), vec);
3285 /* For the Link interrupt */
3286 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0);
3289 /**********************************************************************
3291 * Update the board statistics counters.
3293 **********************************************************************/
3295 ixgbe_update_stats_counters(struct adapter *adapter)
3298 struct ixgbe_hw *hw = &adapter->hw;
3299 u64 good_rx, missed_rx;
3301 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3303 good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);
3304 missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0));
3305 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));
3306 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));
3307 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));
3308 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));
3309 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));
3310 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));
3311 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));
3313 adapter->stats.gprc += (good_rx - missed_rx);
3315 adapter->stats.mpc[0] += missed_rx;
3316 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3317 adapter->stats.bprc += IXGBE_READ_REG(hw, IXGBE_BPRC);
3318 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3320 * Workaround: mprc hardware is incorrectly counting
3321 * broadcasts, so for now we subtract those.
3323 adapter->stats.mprc -= adapter->stats.bprc;
3324 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3325 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3326 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3327 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3328 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3329 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3330 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3332 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3333 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3334 adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3335 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3336 adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3337 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3338 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3339 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3340 adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));
3341 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3342 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3343 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3344 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3345 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3346 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3347 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3348 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3349 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3350 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3351 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3352 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3353 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3354 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3358 /* Fill out the OS statistics structure */
3359 ifp->if_ipackets = adapter->stats.gprc;
3360 ifp->if_opackets = adapter->stats.gptc;
3361 ifp->if_ibytes = adapter->stats.gorc;
3362 ifp->if_obytes = adapter->stats.gotc;
3363 ifp->if_imcasts = adapter->stats.mprc;
3364 ifp->if_collisions = 0;
3368 adapter->stats.mpc[0] +
3369 adapter->stats.crcerrs +
3370 adapter->stats.rlec;
3375 /**********************************************************************
3377 * This routine is called only when ixgbe_display_debug_stats is enabled.
3378 * This routine provides a way to take a look at important statistics
3379 * maintained by the driver and hardware.
3381 **********************************************************************/
3383 ixgbe_print_hw_stats(struct adapter * adapter)
3385 device_t dev = adapter->dev;
3388 device_printf(dev,"Tx Descriptors not Avail1 = %lu\n",
3389 adapter->no_tx_desc_avail1);
3390 device_printf(dev,"Tx Descriptors not Avail2 = %lu\n",
3391 adapter->no_tx_desc_avail2);
3392 device_printf(dev,"Std Mbuf Failed = %lu\n",
3393 adapter->mbuf_alloc_failed);
3394 device_printf(dev,"Std Cluster Failed = %lu\n",
3395 adapter->mbuf_cluster_failed);
3397 device_printf(dev,"Missed Packets = %llu\n",
3398 (long long)adapter->stats.mpc[0]);
3399 device_printf(dev,"Receive length errors = %llu\n",
3400 ((long long)adapter->stats.roc +
3401 (long long)adapter->stats.ruc));
3402 device_printf(dev,"Crc errors = %llu\n",
3403 (long long)adapter->stats.crcerrs);
3404 device_printf(dev,"Driver dropped packets = %lu\n",
3405 adapter->dropped_pkts);
3407 device_printf(dev,"XON Rcvd = %llu\n",
3408 (long long)adapter->stats.lxonrxc);
3409 device_printf(dev,"XON Xmtd = %llu\n",
3410 (long long)adapter->stats.lxontxc);
3411 device_printf(dev,"XOFF Rcvd = %llu\n",
3412 (long long)adapter->stats.lxoffrxc);
3413 device_printf(dev,"XOFF Xmtd = %llu\n",
3414 (long long)adapter->stats.lxofftxc);
3416 device_printf(dev,"Total Packets Rcvd = %llu\n",
3417 (long long)adapter->stats.tpr);
3418 device_printf(dev,"Good Packets Rcvd = %llu\n",
3419 (long long)adapter->stats.gprc);
3420 device_printf(dev,"Good Packets Xmtd = %llu\n",
3421 (long long)adapter->stats.gptc);
3422 device_printf(dev,"TSO Transmissions = %lu\n",
3428 /**********************************************************************
3430 * This routine is called only when em_display_debug_stats is enabled.
3431 * This routine provides a way to take a look at important statistics
3432 * maintained by the driver and hardware.
3434 **********************************************************************/
3436 ixgbe_print_debug_info(struct adapter *adapter)
3438 device_t dev = adapter->dev;
3439 struct rx_ring *rxr = adapter->rx_rings;
3440 struct ixgbe_hw *hw = &adapter->hw;
3441 uint8_t *hw_addr = adapter->hw.hw_addr;
3443 device_printf(dev,"Adapter hardware address = %p \n", hw_addr);
3444 device_printf(dev,"CTRL = 0x%x RXCTRL = 0x%x \n",
3445 IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)),
3446 IXGBE_READ_REG(hw, IXGBE_RXCTRL));
3447 device_printf(dev,"RXDCTL(0) = 0x%x RXDCTL(1) = 0x%x"
3448 " RXCTRL(2) = 0x%x \n",
3449 IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)),
3450 IXGBE_READ_REG(hw, IXGBE_RXDCTL(1)),
3451 IXGBE_READ_REG(hw, IXGBE_RXDCTL(2)));
3452 device_printf(dev,"SRRCTL(0) = 0x%x SRRCTL(1) = 0x%x"
3453 " SRRCTL(2) = 0x%x \n",
3454 IXGBE_READ_REG(hw, IXGBE_SRRCTL(0)),
3455 IXGBE_READ_REG(hw, IXGBE_SRRCTL(1)),
3456 IXGBE_READ_REG(hw, IXGBE_SRRCTL(2)));
3457 device_printf(dev,"EIMC = 0x%x EIMS = 0x%x\n",
3458 IXGBE_READ_REG(hw, IXGBE_EIMC),
3459 IXGBE_READ_REG(hw, IXGBE_EIMS));
3460 device_printf(dev,"Queue(0) tdh = %d, hw tdt = %d\n",
3461 IXGBE_READ_REG(hw, IXGBE_TDH(0)),
3462 IXGBE_READ_REG(hw, IXGBE_TDT(0)));
3463 device_printf(dev,"Error Byte Count = %u \n",
3464 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3466 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3467 device_printf(dev,"Queue %d Packets Received: %lu\n",
3468 rxr->me, (long)rxr->packet_count);
3471 rxr = adapter->rx_rings; // Reset
3472 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3473 device_printf(dev,"Queue %d Bytes Received: %lu\n",
3474 rxr->me, (long)rxr->byte_count);
3477 for (int i = 0; i < adapter->num_rx_queues; i++) {
3478 device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
3479 i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
3480 IXGBE_READ_REG(hw, IXGBE_RDT(i)));
3487 ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS)
3491 struct adapter *adapter;
3494 error = sysctl_handle_int(oidp, &result, 0, req);
3496 if (error || !req->newptr)
3500 adapter = (struct adapter *) arg1;
3501 ixgbe_print_hw_stats(adapter);
3507 ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS)
3510 struct adapter *adapter;
3513 error = sysctl_handle_int(oidp, &result, 0, req);
3515 if (error || !req->newptr)
3519 adapter = (struct adapter *) arg1;
3520 ixgbe_print_debug_info(adapter);
3526 ** Set flow control using sysctl:
3527 ** Flow control values:
3534 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
3537 struct adapter *adapter;
3539 error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
3544 adapter = (struct adapter *) arg1;
3545 switch (ixgbe_flow_control) {
3546 case ixgbe_fc_rx_pause:
3547 case ixgbe_fc_tx_pause:
3549 adapter->hw.fc.original_type = ixgbe_flow_control;
3553 adapter->hw.fc.original_type = ixgbe_fc_none;
3556 ixgbe_setup_fc(&adapter->hw, 0);
3561 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
3562 const char *description, int *limit, int value)
3565 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3566 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3567 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3570 #ifndef NO_82598_A0_SUPPORT
3572 * A0 Workaround: invert descriptor for hardware
3575 desc_flip(void *desc)
3577 struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
3580 d = (struct dhack *)desc;
3585 d->b2 &= 0xFFFFFFF0;
3586 d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;