1 /******************************************************************************
3 Copyright (c) 2001-2008, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
39 /* Undefine this if not using CURRENT */
40 #define IXGBE_VLAN_EVENTS
44 /*********************************************************************
45 * Set this to one to display debug statistics
46 *********************************************************************/
47 int ixgbe_display_debug_stats = 0;
49 /*********************************************************************
51 *********************************************************************/
52 char ixgbe_driver_version[] = "1.4.7";
54 /*********************************************************************
57 * Used by probe to select devices to load on
58 * Last field stores an index into ixgbe_strings
59 * Last entry must be all 0s
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
64 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT_DUAL_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
73 /* required last entry */
77 /*********************************************************************
78 * Table of branding strings
79 *********************************************************************/
81 static char *ixgbe_strings[] = {
82 "Intel(R) PRO/10GbE PCI-Express Network Driver"
85 /*********************************************************************
87 *********************************************************************/
88 static int ixgbe_probe(device_t);
89 static int ixgbe_attach(device_t);
90 static int ixgbe_detach(device_t);
91 static int ixgbe_shutdown(device_t);
92 static void ixgbe_start(struct ifnet *);
93 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
94 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
95 static void ixgbe_watchdog(struct adapter *);
96 static void ixgbe_init(void *);
97 static void ixgbe_init_locked(struct adapter *);
98 static void ixgbe_stop(void *);
99 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
100 static int ixgbe_media_change(struct ifnet *);
101 static void ixgbe_identify_hardware(struct adapter *);
102 static int ixgbe_allocate_pci_resources(struct adapter *);
103 static int ixgbe_allocate_msix(struct adapter *);
104 static int ixgbe_allocate_legacy(struct adapter *);
105 static int ixgbe_allocate_queues(struct adapter *);
106 static int ixgbe_setup_msix(struct adapter *);
107 static void ixgbe_free_pci_resources(struct adapter *);
108 static void ixgbe_local_timer(void *);
109 static int ixgbe_hardware_init(struct adapter *);
110 static void ixgbe_setup_interface(device_t, struct adapter *);
112 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
113 static int ixgbe_setup_transmit_structures(struct adapter *);
114 static void ixgbe_setup_transmit_ring(struct tx_ring *);
115 static void ixgbe_initialize_transmit_units(struct adapter *);
116 static void ixgbe_free_transmit_structures(struct adapter *);
117 static void ixgbe_free_transmit_buffers(struct tx_ring *);
119 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
120 static int ixgbe_setup_receive_structures(struct adapter *);
121 static int ixgbe_setup_receive_ring(struct rx_ring *);
122 static void ixgbe_initialize_receive_units(struct adapter *);
123 static void ixgbe_free_receive_structures(struct adapter *);
124 static void ixgbe_free_receive_buffers(struct rx_ring *);
126 static void ixgbe_enable_intr(struct adapter *);
127 static void ixgbe_disable_intr(struct adapter *);
128 static void ixgbe_update_stats_counters(struct adapter *);
129 static bool ixgbe_txeof(struct tx_ring *);
130 static bool ixgbe_rxeof(struct rx_ring *, int);
131 static void ixgbe_rx_checksum(struct adapter *, u32, struct mbuf *);
132 static void ixgbe_set_promisc(struct adapter *);
133 static void ixgbe_disable_promisc(struct adapter *);
134 static void ixgbe_set_multi(struct adapter *);
135 static void ixgbe_print_hw_stats(struct adapter *);
136 static void ixgbe_print_debug_info(struct adapter *);
137 static void ixgbe_update_link_status(struct adapter *);
138 static int ixgbe_get_buf(struct rx_ring *, int);
139 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
140 static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
141 static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
142 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
143 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
144 struct ixgbe_dma_alloc *, int);
145 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
146 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
147 const char *, int *, int);
148 static boolean_t ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
149 static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
150 static void ixgbe_set_ivar(struct adapter *, u16, u8);
151 static void ixgbe_configure_ivars(struct adapter *);
152 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
154 #ifdef IXGBE_VLAN_EVENTS
155 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
156 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
159 /* Legacy (single vector interrupt handler */
160 static void ixgbe_legacy_irq(void *);
162 /* The MSI/X Interrupt handlers */
163 static void ixgbe_msix_tx(void *);
164 static void ixgbe_msix_rx(void *);
165 static void ixgbe_msix_link(void *);
167 /* Legacy interrupts use deferred handlers */
168 static void ixgbe_handle_tx(void *context, int pending);
169 static void ixgbe_handle_rx(void *context, int pending);
171 #ifndef NO_82598_A0_SUPPORT
172 static void desc_flip(void *);
175 /*********************************************************************
176 * FreeBSD Device Interface Entry Points
177 *********************************************************************/
179 static device_method_t ixgbe_methods[] = {
180 /* Device interface */
181 DEVMETHOD(device_probe, ixgbe_probe),
182 DEVMETHOD(device_attach, ixgbe_attach),
183 DEVMETHOD(device_detach, ixgbe_detach),
184 DEVMETHOD(device_shutdown, ixgbe_shutdown),
188 static driver_t ixgbe_driver = {
189 "ix", ixgbe_methods, sizeof(struct adapter),
192 static devclass_t ixgbe_devclass;
193 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
195 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
196 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
199 ** TUNEABLE PARAMETERS:
202 /* How many packets rxeof tries to clean at a time */
203 static int ixgbe_rx_process_limit = 100;
204 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
206 /* Flow control setting, default to full */
207 static int ixgbe_flow_control = 3;
208 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
211 * Should the driver do LRO on the RX end
212 * this can be toggled on the fly, but the
213 * interface must be reset (down/up) for it
216 static int ixgbe_enable_lro = 0;
217 TUNABLE_INT("hw.ixgbe.enable_lro", &ixgbe_enable_lro);
220 * MSIX should be the default for best performance,
221 * but this allows it to be forced off for testing.
223 static int ixgbe_enable_msix = 1;
224 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
227 * Number of TX/RX Queues, with 0 setting
228 * it autoconfigures to the number of cpus.
230 static int ixgbe_tx_queues = 1;
231 TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
232 static int ixgbe_rx_queues = 4;
233 TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
235 /* Number of TX descriptors per ring */
236 static int ixgbe_txd = DEFAULT_TXD;
237 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
239 /* Number of RX descriptors per ring */
240 static int ixgbe_rxd = DEFAULT_RXD;
241 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
243 /* Total number of Interfaces - need for config sanity check */
244 static int ixgbe_total_ports;
246 /* Optics type of this interface */
247 static int ixgbe_optics;
249 /*********************************************************************
250 * Device identification routine
252 * ixgbe_probe determines if the driver should be loaded on
253 * adapter based on PCI vendor/device id of the adapter.
255 * return 0 on success, positive on failure
256 *********************************************************************/
259 ixgbe_probe(device_t dev)
261 ixgbe_vendor_info_t *ent;
263 u_int16_t pci_vendor_id = 0;
264 u_int16_t pci_device_id = 0;
265 u_int16_t pci_subvendor_id = 0;
266 u_int16_t pci_subdevice_id = 0;
267 char adapter_name[128];
269 INIT_DEBUGOUT("ixgbe_probe: begin");
271 pci_vendor_id = pci_get_vendor(dev);
272 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
275 pci_device_id = pci_get_device(dev);
276 pci_subvendor_id = pci_get_subvendor(dev);
277 pci_subdevice_id = pci_get_subdevice(dev);
279 ent = ixgbe_vendor_info_array;
280 while (ent->vendor_id != 0) {
281 if ((pci_vendor_id == ent->vendor_id) &&
282 (pci_device_id == ent->device_id) &&
284 ((pci_subvendor_id == ent->subvendor_id) ||
285 (ent->subvendor_id == 0)) &&
287 ((pci_subdevice_id == ent->subdevice_id) ||
288 (ent->subdevice_id == 0))) {
289 sprintf(adapter_name, "%s, Version - %s",
290 ixgbe_strings[ent->index],
291 ixgbe_driver_version);
292 switch (pci_device_id) {
293 case IXGBE_DEV_ID_82598AT_DUAL_PORT :
294 ixgbe_total_ports += 2;
296 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
297 ixgbe_optics = IFM_10G_CX4;
298 ixgbe_total_ports += 2;
300 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
301 ixgbe_optics = IFM_10G_SR;
302 ixgbe_total_ports += 2;
304 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
305 ixgbe_optics = IFM_10G_SR;
306 ixgbe_total_ports += 1;
308 case IXGBE_DEV_ID_82598EB_XF_LR :
309 ixgbe_optics = IFM_10G_LR;
310 ixgbe_total_ports += 1;
312 case IXGBE_DEV_ID_82598EB_CX4 :
313 ixgbe_optics = IFM_10G_CX4;
314 ixgbe_total_ports += 1;
316 case IXGBE_DEV_ID_82598AT :
317 ixgbe_total_ports += 1;
321 device_set_desc_copy(dev, adapter_name);
330 /*********************************************************************
331 * Device initialization routine
333 * The attach entry point is called when the driver is being loaded.
334 * This routine identifies the type of hardware, allocates all resources
335 * and initializes the hardware.
337 * return 0 on success, positive on failure
338 *********************************************************************/
341 ixgbe_attach(device_t dev)
343 struct adapter *adapter;
347 INIT_DEBUGOUT("ixgbe_attach: begin");
349 /* Allocate, clear, and link in our adapter structure */
350 adapter = device_get_softc(dev);
351 adapter->dev = adapter->osdep.dev = dev;
354 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
357 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
358 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
359 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
360 adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
362 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
363 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
364 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
365 adapter, 0, ixgbe_sysctl_debug, "I", "Debug Info");
367 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
368 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
369 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
370 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
372 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
373 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
374 OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
375 &ixgbe_enable_lro, 1, "Large Receive Offload");
377 /* Set up the timer callout */
378 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
380 /* Determine hardware revision */
381 ixgbe_identify_hardware(adapter);
383 /* Indicate to RX setup to use Jumbo Clusters */
384 adapter->bigbufs = TRUE;
386 /* Do base PCI setup - map BAR0 */
387 if (ixgbe_allocate_pci_resources(adapter)) {
388 device_printf(dev, "Allocation of PCI resources failed\n");
393 /* Do descriptor calc and sanity checks */
394 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
395 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
396 device_printf(dev, "TXD config issue, using default!\n");
397 adapter->num_tx_desc = DEFAULT_TXD;
399 adapter->num_tx_desc = ixgbe_txd;
402 ** With many RX rings it is easy to exceed the
403 ** system mbuf allocation. Tuning nmbclusters
404 ** can alleviate this.
406 if ((adapter->num_rx_queues > 1) && (nmbclusters > 0 )){
408 /* Calculate the total RX mbuf needs */
409 s = (ixgbe_rxd * adapter->num_rx_queues) * ixgbe_total_ports;
410 if (s > nmbclusters) {
411 device_printf(dev, "RX Descriptors exceed "
412 "system mbuf max, using default instead!\n");
413 ixgbe_rxd = DEFAULT_RXD;
417 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
418 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
419 device_printf(dev, "RXD config issue, using default!\n");
420 adapter->num_rx_desc = DEFAULT_RXD;
422 adapter->num_rx_desc = ixgbe_rxd;
424 /* Allocate our TX/RX Queues */
425 if (ixgbe_allocate_queues(adapter)) {
430 /* Initialize the shared code */
431 if (ixgbe_init_shared_code(&adapter->hw)) {
432 device_printf(dev,"Unable to initialize the shared code\n");
437 /* Initialize the hardware */
438 if (ixgbe_hardware_init(adapter)) {
439 device_printf(dev,"Unable to initialize the hardware\n");
444 if ((adapter->msix > 1) && (ixgbe_enable_msix))
445 error = ixgbe_allocate_msix(adapter);
447 error = ixgbe_allocate_legacy(adapter);
451 /* Setup OS specific network interface */
452 ixgbe_setup_interface(dev, adapter);
454 /* Sysctl for limiting the amount of work done in the taskqueue */
455 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
456 "max number of rx packets to process", &adapter->rx_process_limit,
457 ixgbe_rx_process_limit);
459 /* Initialize statistics */
460 ixgbe_update_stats_counters(adapter);
462 #ifdef IXGBE_VLAN_EVENTS
463 /* Register for VLAN events */
464 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
465 ixgbe_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
466 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
467 ixgbe_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
470 /* let hardware know driver is loaded */
471 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
472 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
473 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
475 INIT_DEBUGOUT("ixgbe_attach: end");
478 ixgbe_free_transmit_structures(adapter);
479 ixgbe_free_receive_structures(adapter);
481 ixgbe_free_pci_resources(adapter);
486 /*********************************************************************
487 * Device removal routine
489 * The detach entry point is called when the driver is being removed.
490 * This routine stops the adapter and deallocates all the resources
491 * that were allocated for driver operation.
493 * return 0 on success, positive on failure
494 *********************************************************************/
497 ixgbe_detach(device_t dev)
499 struct adapter *adapter = device_get_softc(dev);
500 struct tx_ring *txr = adapter->tx_rings;
501 struct rx_ring *rxr = adapter->rx_rings;
504 INIT_DEBUGOUT("ixgbe_detach: begin");
506 /* Make sure VLANS are not using driver */
507 #if __FreeBSD_version >= 700000
508 if (adapter->ifp->if_vlantrunk != NULL) {
510 if (adapter->ifp->if_nvlans != 0) {
512 device_printf(dev,"Vlan in use, detach first\n");
516 IXGBE_CORE_LOCK(adapter);
518 IXGBE_CORE_UNLOCK(adapter);
520 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
522 taskqueue_drain(txr->tq, &txr->tx_task);
523 taskqueue_free(txr->tq);
528 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
530 taskqueue_drain(rxr->tq, &rxr->rx_task);
531 taskqueue_free(rxr->tq);
536 #ifdef IXGBE_VLAN_EVENTS
537 /* Unregister VLAN events */
538 if (adapter->vlan_attach != NULL)
539 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
540 if (adapter->vlan_detach != NULL)
541 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
544 /* let hardware know driver is unloading */
545 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
546 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
547 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
549 ether_ifdetach(adapter->ifp);
550 callout_drain(&adapter->timer);
551 ixgbe_free_pci_resources(adapter);
552 bus_generic_detach(dev);
553 if_free(adapter->ifp);
555 ixgbe_free_transmit_structures(adapter);
556 ixgbe_free_receive_structures(adapter);
558 IXGBE_CORE_LOCK_DESTROY(adapter);
562 /*********************************************************************
564 * Shutdown entry point
566 **********************************************************************/
569 ixgbe_shutdown(device_t dev)
571 struct adapter *adapter = device_get_softc(dev);
572 IXGBE_CORE_LOCK(adapter);
574 IXGBE_CORE_UNLOCK(adapter);
579 /*********************************************************************
580 * Transmit entry point
582 * ixgbe_start is called by the stack to initiate a transmit.
583 * The driver will remain in this routine as long as there are
584 * packets to transmit and transmit resources are available.
585 * In case resources are not available stack is notified and
586 * the packet is requeued.
587 **********************************************************************/
590 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
593 struct adapter *adapter = txr->adapter;
595 IXGBE_TX_LOCK_ASSERT(txr);
597 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
600 if (!adapter->link_active)
603 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
605 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
609 if (ixgbe_xmit(txr, &m_head)) {
612 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
613 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
616 /* Send a copy of the frame to the BPF listener */
617 ETHER_BPF_MTAP(ifp, m_head);
619 /* Set timeout in case hardware has problems transmitting */
620 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
628 ixgbe_start(struct ifnet *ifp)
630 struct adapter *adapter = ifp->if_softc;
631 struct tx_ring *txr = adapter->tx_rings;
635 ** This is really just here for testing
636 ** TX multiqueue, ultimately what is
637 ** needed is the flow support in the stack
638 ** and appropriate logic here to deal with
641 if (adapter->num_tx_queues > 1)
642 queue = (curcpu % adapter->num_tx_queues);
644 txr = &adapter->tx_rings[queue];
646 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
648 ixgbe_start_locked(txr, ifp);
649 IXGBE_TX_UNLOCK(txr);
654 /*********************************************************************
657 * ixgbe_ioctl is called when the user wants to configure the
660 * return 0 on success, positive on failure
661 **********************************************************************/
664 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
667 struct ifreq *ifr = (struct ifreq *) data;
668 struct ifaddr *ifa = (struct ifaddr *) data;
669 struct adapter *adapter = ifp->if_softc;
673 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
674 if (ifa->ifa_addr->sa_family == AF_INET) {
675 ifp->if_flags |= IFF_UP;
676 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
677 IXGBE_CORE_LOCK(adapter);
678 ixgbe_init_locked(adapter);
679 IXGBE_CORE_UNLOCK(adapter);
681 arp_ifinit(ifp, ifa);
683 ether_ioctl(ifp, command, data);
686 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
687 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
690 IXGBE_CORE_LOCK(adapter);
691 ifp->if_mtu = ifr->ifr_mtu;
692 adapter->max_frame_size =
693 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
694 ixgbe_init_locked(adapter);
695 IXGBE_CORE_UNLOCK(adapter);
699 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
700 IXGBE_CORE_LOCK(adapter);
701 if (ifp->if_flags & IFF_UP) {
702 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
703 if ((ifp->if_flags ^ adapter->if_flags) &
704 (IFF_PROMISC | IFF_ALLMULTI)) {
705 ixgbe_disable_promisc(adapter);
706 ixgbe_set_promisc(adapter);
709 ixgbe_init_locked(adapter);
711 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
713 adapter->if_flags = ifp->if_flags;
714 IXGBE_CORE_UNLOCK(adapter);
718 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
719 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
720 IXGBE_CORE_LOCK(adapter);
721 ixgbe_disable_intr(adapter);
722 ixgbe_set_multi(adapter);
723 ixgbe_enable_intr(adapter);
724 IXGBE_CORE_UNLOCK(adapter);
729 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
730 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
734 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
735 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
736 if (mask & IFCAP_HWCSUM)
737 ifp->if_capenable ^= IFCAP_HWCSUM;
738 if (mask & IFCAP_TSO4)
739 ifp->if_capenable ^= IFCAP_TSO4;
740 if (mask & IFCAP_VLAN_HWTAGGING)
741 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
742 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
744 #if __FreeBSD_version >= 700000
745 VLAN_CAPABILITIES(ifp);
750 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
751 error = ether_ioctl(ifp, command, data);
758 /*********************************************************************
759 * Watchdog entry point
761 * This routine is called by the local timer
762 * to detect hardware hangs .
764 **********************************************************************/
767 ixgbe_watchdog(struct adapter *adapter)
769 device_t dev = adapter->dev;
770 struct tx_ring *txr = adapter->tx_rings;
771 struct ixgbe_hw *hw = &adapter->hw;
772 bool tx_hang = FALSE;
774 IXGBE_CORE_LOCK_ASSERT(adapter);
777 * The timer is set to 5 every time ixgbe_start() queues a packet.
778 * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
779 * least one descriptor.
780 * Finally, anytime all descriptors are clean the timer is
783 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
787 if (txr->watchdog_timer == 0 || --txr->watchdog_timer) {
788 IXGBE_TX_UNLOCK(txr);
791 head = IXGBE_READ_REG(hw, IXGBE_TDH(i));
792 tail = IXGBE_READ_REG(hw, IXGBE_TDT(i));
793 if (head == tail) { /* last minute check */
794 IXGBE_TX_UNLOCK(txr);
797 /* Well, seems something is really hung */
799 IXGBE_TX_UNLOCK(txr);
803 if (tx_hang == FALSE)
807 * If we are in this routine because of pause frames, then don't
808 * reset the hardware.
810 if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
811 txr = adapter->tx_rings; /* reset pointer */
812 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
814 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
815 IXGBE_TX_UNLOCK(txr);
821 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
822 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
823 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
824 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
825 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
826 device_printf(dev,"TX(%d) desc avail = %d,"
827 "Next TX to Clean = %d\n",
828 i, txr->tx_avail, txr->next_tx_to_clean);
830 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
831 adapter->watchdog_events++;
833 ixgbe_init_locked(adapter);
836 /*********************************************************************
839 * This routine is used in two ways. It is used by the stack as
840 * init entry point in network interface structure. It is also used
841 * by the driver as a hw/sw initialization routine to get to a
844 * return 0 on success, positive on failure
845 **********************************************************************/
846 #define IXGBE_MHADD_MFS_SHIFT 16
849 ixgbe_init_locked(struct adapter *adapter)
851 struct ifnet *ifp = adapter->ifp;
852 device_t dev = adapter->dev;
854 u32 txdctl, rxdctl, mhadd, gpie;
856 INIT_DEBUGOUT("ixgbe_init: begin");
859 mtx_assert(&adapter->core_mtx, MA_OWNED);
863 /* Get the latest mac address, User can use a LAA */
864 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
865 IXGBE_ETH_LENGTH_OF_ADDRESS);
866 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, 1);
867 adapter->hw.addr_ctrl.rar_used_count = 1;
869 /* Initialize the hardware */
870 if (ixgbe_hardware_init(adapter)) {
871 device_printf(dev, "Unable to initialize the hardware\n");
875 #ifndef IXGBE_VLAN_EVENTS
876 /* With events this is done when a vlan registers */
877 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
879 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
880 ctrl |= IXGBE_VLNCTRL_VME;
881 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
882 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
886 /* Prepare transmit descriptors and buffers */
887 if (ixgbe_setup_transmit_structures(adapter)) {
888 device_printf(dev,"Could not setup transmit structures\n");
893 ixgbe_initialize_transmit_units(adapter);
895 /* Setup Multicast table */
896 ixgbe_set_multi(adapter);
899 ** If we are resetting MTU smaller than 2K
900 ** drop to small RX buffers
902 if (adapter->max_frame_size <= MCLBYTES)
903 adapter->bigbufs = FALSE;
905 /* Prepare receive descriptors and buffers */
906 if (ixgbe_setup_receive_structures(adapter)) {
907 device_printf(dev,"Could not setup receive structures\n");
912 /* Configure RX settings */
913 ixgbe_initialize_receive_units(adapter);
915 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
916 /* Enable Fan Failure Interrupt */
917 if (adapter->hw.phy.media_type == ixgbe_media_type_copper)
918 gpie |= IXGBE_SDP1_GPIEN;
920 /* Enable Enhanced MSIX mode */
921 gpie |= IXGBE_GPIE_MSIX_MODE;
922 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
925 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
927 /* Set the various hardware offload abilities */
928 ifp->if_hwassist = 0;
929 if (ifp->if_capenable & IFCAP_TSO4)
930 ifp->if_hwassist |= CSUM_TSO;
931 else if (ifp->if_capenable & IFCAP_TXCSUM)
932 ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
935 if (ifp->if_mtu > ETHERMTU) {
936 mhadd = IXGBE_READ_REG(&adapter->hw, IXGBE_MHADD);
937 mhadd &= ~IXGBE_MHADD_MFS_MASK;
938 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
939 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MHADD, mhadd);
942 /* Now enable all the queues */
944 for (int i = 0; i < adapter->num_tx_queues; i++) {
945 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
946 txdctl |= IXGBE_TXDCTL_ENABLE;
947 /* Set WTHRESH to 8, burst writeback */
949 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
952 for (int i = 0; i < adapter->num_rx_queues; i++) {
953 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
954 /* PTHRESH set to 32 */
956 rxdctl |= IXGBE_RXDCTL_ENABLE;
957 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
960 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
962 /* Set up MSI/X routing */
963 ixgbe_configure_ivars(adapter);
965 ixgbe_enable_intr(adapter);
967 /* Now inform the stack we're ready */
968 ifp->if_drv_flags |= IFF_DRV_RUNNING;
969 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
975 ixgbe_init(void *arg)
977 struct adapter *adapter = arg;
979 IXGBE_CORE_LOCK(adapter);
980 ixgbe_init_locked(adapter);
981 IXGBE_CORE_UNLOCK(adapter);
987 ** Legacy Deferred Interrupt Handlers
991 ixgbe_handle_rx(void *context, int pending)
993 struct rx_ring *rxr = context;
994 struct adapter *adapter = rxr->adapter;
997 while (loop++ < MAX_INTR)
998 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
1003 ixgbe_handle_tx(void *context, int pending)
1005 struct tx_ring *txr = context;
1006 struct adapter *adapter = txr->adapter;
1007 struct ifnet *ifp = adapter->ifp;
1011 while (loop++ < MAX_INTR)
1012 if (ixgbe_txeof(txr) == 0)
1014 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1015 ixgbe_start_locked(txr, ifp);
1016 IXGBE_TX_UNLOCK(txr);
1020 /*********************************************************************
1022 * Legacy Interrupt Service routine
1024 **********************************************************************/
1027 ixgbe_legacy_irq(void *arg)
1030 struct adapter *adapter = arg;
1031 struct tx_ring *txr = adapter->tx_rings;
1032 struct rx_ring *rxr = adapter->rx_rings;
1033 struct ixgbe_hw *hw;
1036 reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1040 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0)
1041 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1042 if (ixgbe_txeof(txr) != 0)
1043 taskqueue_enqueue(txr->tq, &txr->tx_task);
1045 /* Check for fan failure */
1046 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1047 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1048 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1049 "REPLACE IMMEDIATELY!!\n");
1050 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
1051 IXGBE_EICR_GPI_SDP1);
1053 /* Link status change */
1054 if (reg_eicr & IXGBE_EICR_LSC)
1055 ixgbe_update_link_status(adapter);
1061 /*********************************************************************
1063 * MSI TX Interrupt Service routine
1065 **********************************************************************/
1068 ixgbe_msix_tx(void *arg)
1070 struct tx_ring *txr = arg;
1071 struct adapter *adapter = txr->adapter;
1076 while (loop++ < MAX_INTR)
1077 if (ixgbe_txeof(txr) == 0)
1079 IXGBE_TX_UNLOCK(txr);
1080 /* Reenable this interrupt */
1081 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1086 /*********************************************************************
1088 * MSI RX Interrupt Service routine
1090 **********************************************************************/
1093 ixgbe_msix_rx(void *arg)
1095 struct rx_ring *rxr = arg;
1096 struct adapter *adapter = rxr->adapter;
1100 while (loop++ < MAX_INTR)
1101 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
1103 /* Reenable this interrupt */
1104 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1109 ixgbe_msix_link(void *arg)
1111 struct adapter *adapter = arg;
1112 struct ixgbe_hw *hw = &adapter->hw;
1115 ++adapter->link_irq;
1117 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1119 if (reg_eicr & IXGBE_EICR_LSC)
1120 ixgbe_update_link_status(adapter);
1122 /* Check for fan failure */
1123 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1124 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1125 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1126 "REPLACE IMMEDIATELY!!\n");
1127 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1130 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1135 /*********************************************************************
1137 * Media Ioctl callback
1139 * This routine is called whenever the user queries the status of
1140 * the interface using ifconfig.
1142 **********************************************************************/
1144 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1146 struct adapter *adapter = ifp->if_softc;
1148 INIT_DEBUGOUT("ixgbe_media_status: begin");
1149 IXGBE_CORE_LOCK(adapter);
1150 ixgbe_update_link_status(adapter);
1152 ifmr->ifm_status = IFM_AVALID;
1153 ifmr->ifm_active = IFM_ETHER;
1155 if (!adapter->link_active) {
1156 IXGBE_CORE_UNLOCK(adapter);
1160 ifmr->ifm_status |= IFM_ACTIVE;
1162 switch (adapter->link_speed) {
1163 case IXGBE_LINK_SPEED_1GB_FULL:
1164 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1166 case IXGBE_LINK_SPEED_10GB_FULL:
1167 ifmr->ifm_active |= ixgbe_optics | IFM_FDX;
1171 IXGBE_CORE_UNLOCK(adapter);
1176 /*********************************************************************
1178 * Media Ioctl callback
1180 * This routine is called when the user changes speed/duplex using
1181 * media/mediopt option with ifconfig.
1183 **********************************************************************/
1185 ixgbe_media_change(struct ifnet * ifp)
1187 struct adapter *adapter = ifp->if_softc;
1188 struct ifmedia *ifm = &adapter->media;
1190 INIT_DEBUGOUT("ixgbe_media_change: begin");
1192 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1195 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1197 adapter->hw.mac.autoneg = TRUE;
1198 adapter->hw.phy.autoneg_advertised =
1199 IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
1202 device_printf(adapter->dev, "Only auto media type\n");
1209 /*********************************************************************
1211 * This routine maps the mbufs to tx descriptors.
1212 * WARNING: while this code is using an MQ style infrastructure,
1213 * it would NOT work as is with more than 1 queue.
1215 * return 0 on success, positive on failure
1216 **********************************************************************/
1219 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1221 struct adapter *adapter = txr->adapter;
1222 u32 olinfo_status = 0, cmd_type_len = 0;
1224 int i, j, error, nsegs;
1225 int first, last = 0;
1226 struct mbuf *m_head;
1227 bus_dma_segment_t segs[IXGBE_MAX_SCATTER];
1229 struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1230 union ixgbe_adv_tx_desc *txd = NULL;
1235 /* Basic descriptor defines */
1236 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
1237 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
1239 if (m_head->m_flags & M_VLANTAG)
1240 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1243 * Force a cleanup if number of TX descriptors
1244 * available is below the threshold. If it fails
1245 * to get above, then abort transmit.
1247 if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
1249 /* Make sure things have improved */
1250 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
1251 txr->no_tx_desc_avail++;
1257 * Important to capture the first descriptor
1258 * used because it will contain the index of
1259 * the one we tell the hardware to report back
1261 first = txr->next_avail_tx_desc;
1262 txbuf = &txr->tx_buffers[first];
1263 txbuf_mapped = txbuf;
1267 * Map the packet for DMA.
1269 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1270 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1272 if (error == EFBIG) {
1275 m = m_defrag(*m_headp, M_DONTWAIT);
1277 adapter->mbuf_alloc_failed++;
1285 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1286 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1288 if (error == ENOMEM) {
1289 adapter->no_tx_dma_setup++;
1291 } else if (error != 0) {
1292 adapter->no_tx_dma_setup++;
1297 } else if (error == ENOMEM) {
1298 adapter->no_tx_dma_setup++;
1300 } else if (error != 0) {
1301 adapter->no_tx_dma_setup++;
1307 /* Make certain there are enough descriptors */
1308 if (nsegs > txr->tx_avail - 2) {
1309 txr->no_tx_desc_avail++;
1316 ** Set the appropriate offload context
1317 ** this becomes the first descriptor of
1320 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1321 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1322 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1323 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1324 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1326 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1327 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1329 i = txr->next_avail_tx_desc;
1330 for (j = 0; j < nsegs; j++) {
1334 txbuf = &txr->tx_buffers[i];
1335 txd = &txr->tx_base[i];
1336 seglen = segs[j].ds_len;
1337 segaddr = htole64(segs[j].ds_addr);
1339 txd->read.buffer_addr = segaddr;
1340 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1341 cmd_type_len |seglen);
1342 txd->read.olinfo_status = htole32(olinfo_status);
1343 last = i; /* Next descriptor that will get completed */
1345 if (++i == adapter->num_tx_desc)
1348 txbuf->m_head = NULL;
1350 ** we have to do this inside the loop right now
1351 ** because of the hardware workaround.
1353 if (j == (nsegs -1)) /* Last descriptor gets EOP and RS */
1354 txd->read.cmd_type_len |=
1355 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1356 #ifndef NO_82598_A0_SUPPORT
1357 if (adapter->hw.revision_id == 0)
1362 txr->tx_avail -= nsegs;
1363 txr->next_avail_tx_desc = i;
1365 txbuf->m_head = m_head;
1367 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1369 /* Set the index of the descriptor that will be marked done */
1370 txbuf = &txr->tx_buffers[first];
1372 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1373 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1375 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1376 * hardware that this frame is available to transmit.
1378 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1383 bus_dmamap_unload(txr->txtag, txbuf->map);
1389 ixgbe_set_promisc(struct adapter *adapter)
1393 struct ifnet *ifp = adapter->ifp;
1395 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1397 if (ifp->if_flags & IFF_PROMISC) {
1398 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1399 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1400 } else if (ifp->if_flags & IFF_ALLMULTI) {
1401 reg_rctl |= IXGBE_FCTRL_MPE;
1402 reg_rctl &= ~IXGBE_FCTRL_UPE;
1403 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1409 ixgbe_disable_promisc(struct adapter * adapter)
1413 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1415 reg_rctl &= (~IXGBE_FCTRL_UPE);
1416 reg_rctl &= (~IXGBE_FCTRL_MPE);
1417 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1423 /*********************************************************************
1426 * This routine is called whenever multicast address list is updated.
1428 **********************************************************************/
1429 #define IXGBE_RAR_ENTRIES 16
1432 ixgbe_set_multi(struct adapter *adapter)
1435 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1437 struct ifmultiaddr *ifma;
1439 struct ifnet *ifp = adapter->ifp;
1441 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1443 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1444 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1445 if (ifp->if_flags & IFF_PROMISC)
1446 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1447 else if (ifp->if_flags & IFF_ALLMULTI) {
1448 fctrl |= IXGBE_FCTRL_MPE;
1449 fctrl &= ~IXGBE_FCTRL_UPE;
1451 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1453 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1456 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1457 if (ifma->ifma_addr->sa_family != AF_LINK)
1459 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1460 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1461 IXGBE_ETH_LENGTH_OF_ADDRESS);
1464 IF_ADDR_UNLOCK(ifp);
1467 ixgbe_update_mc_addr_list(&adapter->hw,
1468 update_ptr, mcnt, ixgbe_mc_array_itr);
1474 * This is an iterator function now needed by the multicast
1475 * shared code. It simply feeds the shared code routine the
1476 * addresses in the array of ixgbe_set_multi() one by one.
1479 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1481 u8 *addr = *update_ptr;
1485 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1486 *update_ptr = newptr;
1491 /*********************************************************************
1494 * This routine checks for link status,updates statistics,
1495 * and runs the watchdog timer.
1497 **********************************************************************/
1500 ixgbe_local_timer(void *arg)
1502 struct adapter *adapter = arg;
1503 struct ifnet *ifp = adapter->ifp;
1505 mtx_assert(&adapter->core_mtx, MA_OWNED);
1507 ixgbe_update_link_status(adapter);
1508 ixgbe_update_stats_counters(adapter);
1509 if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1510 ixgbe_print_hw_stats(adapter);
1513 * Each second we check the watchdog
1514 * to protect against hardware hangs.
1516 ixgbe_watchdog(adapter);
1518 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1522 ixgbe_update_link_status(struct adapter *adapter)
1524 boolean_t link_up = FALSE;
1525 struct ifnet *ifp = adapter->ifp;
1526 struct tx_ring *txr = adapter->tx_rings;
1527 device_t dev = adapter->dev;
1529 ixgbe_check_link(&adapter->hw, &adapter->link_speed, &link_up, 0);
1532 if (adapter->link_active == FALSE) {
1534 device_printf(dev,"Link is up %d Gbps %s \n",
1535 ((adapter->link_speed == 128)? 10:1),
1537 adapter->link_active = TRUE;
1538 if_link_state_change(ifp, LINK_STATE_UP);
1540 } else { /* Link down */
1541 if (adapter->link_active == TRUE) {
1543 device_printf(dev,"Link is Down\n");
1544 if_link_state_change(ifp, LINK_STATE_DOWN);
1545 adapter->link_active = FALSE;
1546 for (int i = 0; i < adapter->num_tx_queues;
1548 txr->watchdog_timer = FALSE;
1557 /*********************************************************************
1559 * This routine disables all traffic on the adapter by issuing a
1560 * global reset on the MAC and deallocates TX/RX buffers.
1562 **********************************************************************/
1565 ixgbe_stop(void *arg)
1568 struct adapter *adapter = arg;
1571 mtx_assert(&adapter->core_mtx, MA_OWNED);
1573 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1574 ixgbe_disable_intr(adapter);
1576 /* Tell the stack that the interface is no longer active */
1577 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1579 ixgbe_reset_hw(&adapter->hw);
1580 adapter->hw.adapter_stopped = FALSE;
1581 ixgbe_stop_adapter(&adapter->hw);
1582 callout_stop(&adapter->timer);
1584 /* reprogram the RAR[0] in case user changed it. */
1585 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1591 /*********************************************************************
1593 * Determine hardware revision.
1595 **********************************************************************/
1597 ixgbe_identify_hardware(struct adapter *adapter)
1599 device_t dev = adapter->dev;
1601 /* Save off the information about this board */
1602 adapter->hw.vendor_id = pci_get_vendor(dev);
1603 adapter->hw.device_id = pci_get_device(dev);
1604 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1605 adapter->hw.subsystem_vendor_id =
1606 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1607 adapter->hw.subsystem_device_id =
1608 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1613 /*********************************************************************
1615 * Setup the Legacy or MSI Interrupt handler
1617 **********************************************************************/
1619 ixgbe_allocate_legacy(struct adapter *adapter)
1621 device_t dev = adapter->dev;
1622 struct tx_ring *txr = adapter->tx_rings;
1623 struct rx_ring *rxr = adapter->rx_rings;
1626 /* Legacy RID at 0 */
1627 if (adapter->msix == 0)
1628 adapter->rid[0] = 0;
1630 /* We allocate a single interrupt resource */
1631 adapter->res[0] = bus_alloc_resource_any(dev,
1632 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
1633 if (adapter->res[0] == NULL) {
1634 device_printf(dev, "Unable to allocate bus resource: "
1640 * Try allocating a fast interrupt and the associated deferred
1641 * processing contexts.
1643 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
1644 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
1645 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
1646 taskqueue_thread_enqueue, &txr->tq);
1647 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
1648 taskqueue_thread_enqueue, &rxr->tq);
1649 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
1650 device_get_nameunit(adapter->dev));
1651 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
1652 device_get_nameunit(adapter->dev));
1653 if ((error = bus_setup_intr(dev, adapter->res[0],
1654 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
1655 adapter, &adapter->tag[0])) != 0) {
1656 device_printf(dev, "Failed to register fast interrupt "
1657 "handler: %d\n", error);
1658 taskqueue_free(txr->tq);
1659 taskqueue_free(rxr->tq);
1669 /*********************************************************************
1671 * Setup MSIX Interrupt resources and handlers
1673 **********************************************************************/
1675 ixgbe_allocate_msix(struct adapter *adapter)
1677 device_t dev = adapter->dev;
1678 struct tx_ring *txr = adapter->tx_rings;
1679 struct rx_ring *rxr = adapter->rx_rings;
1680 int error, vector = 0;
1682 /* TX setup: the code is here for multi tx,
1683 there are other parts of the driver not ready for it */
1684 for (int i = 0; i < adapter->num_tx_queues; i++, vector++, txr++) {
1685 adapter->res[vector] = bus_alloc_resource_any(dev,
1686 SYS_RES_IRQ, &adapter->rid[vector],
1687 RF_SHAREABLE | RF_ACTIVE);
1688 if (!adapter->res[vector]) {
1689 device_printf(dev,"Unable to allocate"
1690 " bus resource: tx interrupt [%d]\n", vector);
1693 /* Set the handler function */
1694 error = bus_setup_intr(dev, adapter->res[vector],
1695 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1696 ixgbe_msix_tx, txr, &adapter->tag[vector]);
1698 adapter->res[vector] = NULL;
1699 device_printf(dev, "Failed to register TX handler");
1703 txr->eims = IXGBE_IVAR_TX_QUEUE(vector);
1707 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rxr++) {
1708 adapter->res[vector] = bus_alloc_resource_any(dev,
1709 SYS_RES_IRQ, &adapter->rid[vector],
1710 RF_SHAREABLE | RF_ACTIVE);
1711 if (!adapter->res[vector]) {
1712 device_printf(dev,"Unable to allocate"
1713 " bus resource: rx interrupt [%d],"
1714 "rid = %d\n", i, adapter->rid[vector]);
1717 /* Set the handler function */
1718 error = bus_setup_intr(dev, adapter->res[vector],
1719 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_rx,
1720 rxr, &adapter->tag[vector]);
1722 adapter->res[vector] = NULL;
1723 device_printf(dev, "Failed to register RX handler");
1727 rxr->eims = IXGBE_IVAR_RX_QUEUE(vector);
1730 /* Now for Link changes */
1731 adapter->res[vector] = bus_alloc_resource_any(dev,
1732 SYS_RES_IRQ, &adapter->rid[vector], RF_SHAREABLE | RF_ACTIVE);
1733 if (!adapter->res[vector]) {
1734 device_printf(dev,"Unable to allocate"
1735 " bus resource: Link interrupt [%d]\n", adapter->rid[vector]);
1738 /* Set the link handler function */
1739 error = bus_setup_intr(dev, adapter->res[vector],
1740 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_link,
1741 adapter, &adapter->tag[vector]);
1743 adapter->res[vector] = NULL;
1744 device_printf(dev, "Failed to register LINK handler");
1747 adapter->linkvec = vector;
1754 * Setup Either MSI/X or MSI
1757 ixgbe_setup_msix(struct adapter *adapter)
1759 device_t dev = adapter->dev;
1760 int rid, want, queues, msgs;
1762 /* First try MSI/X */
1763 rid = PCIR_BAR(IXGBE_MSIX_BAR);
1764 adapter->msix_mem = bus_alloc_resource_any(dev,
1765 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1766 if (!adapter->msix_mem) {
1767 /* May not be enabled */
1768 device_printf(adapter->dev,
1769 "Unable to map MSIX table \n");
1773 msgs = pci_msix_count(dev);
1774 if (msgs == 0) { /* system has msix disabled */
1775 bus_release_resource(dev, SYS_RES_MEMORY,
1776 PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
1777 adapter->msix_mem = NULL;
1781 /* Figure out a reasonable auto config value */
1782 queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
1784 if (ixgbe_tx_queues == 0)
1785 ixgbe_tx_queues = queues;
1786 if (ixgbe_rx_queues == 0)
1787 ixgbe_rx_queues = queues;
1788 want = ixgbe_tx_queues + ixgbe_rx_queues + 1;
1792 device_printf(adapter->dev,
1793 "MSIX Configuration Problem, "
1794 "%d vectors but %d queues wanted!\n",
1798 if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
1799 device_printf(adapter->dev,
1800 "Using MSIX interrupts with %d vectors\n", msgs);
1801 adapter->num_tx_queues = ixgbe_tx_queues;
1802 adapter->num_rx_queues = ixgbe_rx_queues;
1806 msgs = pci_msi_count(dev);
1807 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
1808 device_printf(adapter->dev,"Using MSI interrupt\n");
1813 ixgbe_allocate_pci_resources(struct adapter *adapter)
1816 device_t dev = adapter->dev;
1819 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1822 if (!(adapter->pci_mem)) {
1823 device_printf(dev,"Unable to allocate bus resource: memory\n");
1827 adapter->osdep.mem_bus_space_tag =
1828 rman_get_bustag(adapter->pci_mem);
1829 adapter->osdep.mem_bus_space_handle =
1830 rman_get_bushandle(adapter->pci_mem);
1831 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1834 * Init the resource arrays
1836 for (int i = 0; i < IXGBE_MSGS; i++) {
1837 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
1838 adapter->tag[i] = NULL;
1839 adapter->res[i] = NULL;
1842 /* Legacy defaults */
1843 adapter->num_tx_queues = 1;
1844 adapter->num_rx_queues = 1;
1846 /* Now setup MSI or MSI/X */
1847 adapter->msix = ixgbe_setup_msix(adapter);
1849 adapter->hw.back = &adapter->osdep;
1854 ixgbe_free_pci_resources(struct adapter * adapter)
1856 device_t dev = adapter->dev;
1859 * Legacy has this set to 0, but we need
1860 * to run this once, so reset it.
1862 if (adapter->msix == 0)
1866 * First release all the interrupt resources:
1867 * notice that since these are just kept
1868 * in an array we can do the same logic
1869 * whether its MSIX or just legacy.
1871 for (int i = 0; i < adapter->msix; i++) {
1872 if (adapter->tag[i] != NULL) {
1873 bus_teardown_intr(dev, adapter->res[i],
1875 adapter->tag[i] = NULL;
1877 if (adapter->res[i] != NULL) {
1878 bus_release_resource(dev, SYS_RES_IRQ,
1879 adapter->rid[i], adapter->res[i]);
1884 pci_release_msi(dev);
1886 if (adapter->msix_mem != NULL)
1887 bus_release_resource(dev, SYS_RES_MEMORY,
1888 PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
1890 if (adapter->pci_mem != NULL)
1891 bus_release_resource(dev, SYS_RES_MEMORY,
1892 PCIR_BAR(0), adapter->pci_mem);
1897 /*********************************************************************
1899 * Initialize the hardware to a configuration as specified by the
1900 * adapter structure. The controller is reset, the EEPROM is
1901 * verified, the MAC address is set, then the shared initialization
1902 * routines are called.
1904 **********************************************************************/
1906 ixgbe_hardware_init(struct adapter *adapter)
1908 device_t dev = adapter->dev;
1912 /* Issue a global reset */
1913 adapter->hw.adapter_stopped = FALSE;
1914 ixgbe_stop_adapter(&adapter->hw);
1916 /* Make sure we have a good EEPROM before we read from it */
1917 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
1918 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
1922 /* Get Hardware Flow Control setting */
1923 adapter->hw.fc.type = ixgbe_fc_full;
1924 adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
1925 adapter->hw.fc.low_water = IXGBE_FC_LO;
1926 adapter->hw.fc.high_water = IXGBE_FC_HI;
1927 adapter->hw.fc.send_xon = TRUE;
1929 if (ixgbe_init_hw(&adapter->hw)) {
1930 device_printf(dev,"Hardware Initialization Failed");
1937 /*********************************************************************
1939 * Setup networking device structure and register an interface.
1941 **********************************************************************/
1943 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1946 struct ixgbe_hw *hw = &adapter->hw;
1947 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1949 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1951 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1952 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1953 ifp->if_mtu = ETHERMTU;
1954 ifp->if_baudrate = 1000000000;
1955 ifp->if_init = ixgbe_init;
1956 ifp->if_softc = adapter;
1957 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1958 ifp->if_ioctl = ixgbe_ioctl;
1959 ifp->if_start = ixgbe_start;
1961 ifp->if_watchdog = NULL;
1962 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1964 ether_ifattach(ifp, adapter->hw.mac.addr);
1966 adapter->max_frame_size =
1967 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1970 * Tell the upper layer(s) we support long frames.
1972 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1974 ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4);
1975 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1976 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1978 ifp->if_capenable = ifp->if_capabilities;
1980 if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
1981 (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT))
1982 ixgbe_setup_link_speed(hw, (IXGBE_LINK_SPEED_10GB_FULL |
1983 IXGBE_LINK_SPEED_1GB_FULL), TRUE, TRUE);
1985 ixgbe_setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL,
1989 * Specify the media types supported by this adapter and register
1990 * callbacks to update media and link information
1992 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1993 ixgbe_media_status);
1994 ifmedia_add(&adapter->media, IFM_ETHER | ixgbe_optics |
1996 if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
1997 (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT)) {
1998 ifmedia_add(&adapter->media,
1999 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2000 ifmedia_add(&adapter->media,
2001 IFM_ETHER | IFM_1000_T, 0, NULL);
2003 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2004 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2009 /********************************************************************
2010 * Manage DMA'able memory.
2011 *******************************************************************/
2013 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2017 *(bus_addr_t *) arg = segs->ds_addr;
2022 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2023 struct ixgbe_dma_alloc *dma, int mapflags)
2025 device_t dev = adapter->dev;
2028 r = bus_dma_tag_create(NULL, /* parent */
2029 PAGE_SIZE, 0, /* alignment, bounds */
2030 BUS_SPACE_MAXADDR, /* lowaddr */
2031 BUS_SPACE_MAXADDR, /* highaddr */
2032 NULL, NULL, /* filter, filterarg */
2035 size, /* maxsegsize */
2036 BUS_DMA_ALLOCNOW, /* flags */
2037 NULL, /* lockfunc */
2038 NULL, /* lockfuncarg */
2041 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2045 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2046 BUS_DMA_NOWAIT, &dma->dma_map);
2048 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2052 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2056 mapflags | BUS_DMA_NOWAIT);
2058 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2062 dma->dma_size = size;
2065 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2067 bus_dma_tag_destroy(dma->dma_tag);
2069 dma->dma_map = NULL;
2070 dma->dma_tag = NULL;
2075 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2077 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2078 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2079 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2080 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2081 bus_dma_tag_destroy(dma->dma_tag);
2085 /*********************************************************************
2087 * Allocate memory for the transmit and receive rings, and then
2088 * the descriptors associated with each, called only once at attach.
2090 **********************************************************************/
2092 ixgbe_allocate_queues(struct adapter *adapter)
2094 device_t dev = adapter->dev;
2095 struct tx_ring *txr;
2096 struct rx_ring *rxr;
2097 int rsize, tsize, error = IXGBE_SUCCESS;
2098 char name_string[16];
2099 int txconf = 0, rxconf = 0;
2101 /* First allocate the TX ring struct memory */
2102 if (!(adapter->tx_rings =
2103 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2104 adapter->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2105 device_printf(dev, "Unable to allocate TX ring memory\n");
2109 txr = adapter->tx_rings;
2111 /* Next allocate the RX */
2112 if (!(adapter->rx_rings =
2113 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2114 adapter->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2115 device_printf(dev, "Unable to allocate RX ring memory\n");
2119 rxr = adapter->rx_rings;
2121 /* For the ring itself */
2122 tsize = roundup2(adapter->num_tx_desc *
2123 sizeof(union ixgbe_adv_tx_desc), 4096);
2126 * Now set up the TX queues, txconf is needed to handle the
2127 * possibility that things fail midcourse and we need to
2128 * undo memory gracefully
2130 for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) {
2131 /* Set up some basics */
2132 txr = &adapter->tx_rings[i];
2133 txr->adapter = adapter;
2136 /* Initialize the TX side lock */
2137 snprintf(name_string, sizeof(name_string), "%s:tx(%d)",
2138 device_get_nameunit(dev), txr->me);
2139 mtx_init(&txr->tx_mtx, name_string, NULL, MTX_DEF);
2141 if (ixgbe_dma_malloc(adapter, tsize,
2142 &txr->txdma, BUS_DMA_NOWAIT)) {
2144 "Unable to allocate TX Descriptor memory\n");
2148 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2149 bzero((void *)txr->tx_base, tsize);
2151 /* Now allocate transmit buffers for the ring */
2152 if (ixgbe_allocate_transmit_buffers(txr)) {
2154 "Critical Failure setting up transmit buffers\n");
2162 * Next the RX queues...
2164 rsize = roundup2(adapter->num_rx_desc *
2165 sizeof(union ixgbe_adv_rx_desc), 4096);
2166 for (int i = 0; i < adapter->num_rx_queues; i++, rxconf++) {
2167 rxr = &adapter->rx_rings[i];
2168 /* Set up some basics */
2169 rxr->adapter = adapter;
2172 /* Initialize the TX side lock */
2173 snprintf(name_string, sizeof(name_string), "%s:rx(%d)",
2174 device_get_nameunit(dev), rxr->me);
2175 mtx_init(&rxr->rx_mtx, name_string, NULL, MTX_DEF);
2177 if (ixgbe_dma_malloc(adapter, rsize,
2178 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2180 "Unable to allocate RxDescriptor memory\n");
2184 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2185 bzero((void *)rxr->rx_base, rsize);
2187 /* Allocate receive buffers for the ring*/
2188 if (ixgbe_allocate_receive_buffers(rxr)) {
2190 "Critical Failure setting up receive buffers\n");
2199 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2200 ixgbe_dma_free(adapter, &rxr->rxdma);
2202 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2203 ixgbe_dma_free(adapter, &txr->txdma);
2204 free(adapter->rx_rings, M_DEVBUF);
2206 free(adapter->tx_rings, M_DEVBUF);
2211 /*********************************************************************
2213 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2214 * the information needed to transmit a packet on the wire. This is
2215 * called only once at attach, setup is done every reset.
2217 **********************************************************************/
2219 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2221 struct adapter *adapter = txr->adapter;
2222 device_t dev = adapter->dev;
2223 struct ixgbe_tx_buf *txbuf;
2227 * Setup DMA descriptor areas.
2229 if ((error = bus_dma_tag_create(NULL, /* parent */
2230 PAGE_SIZE, 0, /* alignment, bounds */
2231 BUS_SPACE_MAXADDR, /* lowaddr */
2232 BUS_SPACE_MAXADDR, /* highaddr */
2233 NULL, NULL, /* filter, filterarg */
2234 IXGBE_TSO_SIZE, /* maxsize */
2235 IXGBE_MAX_SCATTER, /* nsegments */
2236 PAGE_SIZE, /* maxsegsize */
2238 NULL, /* lockfunc */
2239 NULL, /* lockfuncarg */
2241 device_printf(dev,"Unable to allocate TX DMA tag\n");
2245 if (!(txr->tx_buffers =
2246 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2247 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2248 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2253 /* Create the descriptor buffer dma maps */
2254 txbuf = txr->tx_buffers;
2255 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2256 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2258 device_printf(dev, "Unable to create TX DMA map\n");
2265 /* We free all, it handles case where we are in the middle */
2266 ixgbe_free_transmit_structures(adapter);
2270 /*********************************************************************
2272 * Initialize a transmit ring.
2274 **********************************************************************/
2276 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2278 struct adapter *adapter = txr->adapter;
2279 struct ixgbe_tx_buf *txbuf;
2282 /* Clear the old ring contents */
2283 bzero((void *)txr->tx_base,
2284 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2286 txr->next_avail_tx_desc = 0;
2287 txr->next_tx_to_clean = 0;
2289 /* Free any existing tx buffers. */
2290 txbuf = txr->tx_buffers;
2291 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2292 if (txbuf->m_head != NULL) {
2293 bus_dmamap_sync(txr->txtag, txbuf->map,
2294 BUS_DMASYNC_POSTWRITE);
2295 bus_dmamap_unload(txr->txtag, txbuf->map);
2296 m_freem(txbuf->m_head);
2297 txbuf->m_head = NULL;
2301 /* Set number of descriptors available */
2302 txr->tx_avail = adapter->num_tx_desc;
2304 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2305 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2309 /*********************************************************************
2311 * Initialize all transmit rings.
2313 **********************************************************************/
2315 ixgbe_setup_transmit_structures(struct adapter *adapter)
2317 struct tx_ring *txr = adapter->tx_rings;
2319 for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
2320 ixgbe_setup_transmit_ring(txr);
2325 /*********************************************************************
2327 * Enable transmit unit.
2329 **********************************************************************/
2331 ixgbe_initialize_transmit_units(struct adapter *adapter)
2333 struct tx_ring *txr = adapter->tx_rings;
2334 struct ixgbe_hw *hw = &adapter->hw;
2336 /* Setup the Base and Length of the Tx Descriptor Ring */
2338 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2339 u64 txhwb = 0, tdba = txr->txdma.dma_paddr;
2342 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2343 (tdba & 0x00000000ffffffffULL));
2344 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2345 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2346 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2348 /* Setup for Head WriteBack */
2349 txhwb = (u64)vtophys(&txr->tx_hwb);
2350 txhwb |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2351 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i),
2352 (txhwb & 0x00000000ffffffffULL));
2353 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(i),
2355 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2356 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2357 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2359 /* Setup the HW Tx Head and Tail descriptor pointers */
2360 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2361 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2363 /* Setup Transmit Descriptor Cmd Settings */
2364 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2366 txr->watchdog_timer = 0;
2372 /*********************************************************************
2374 * Free all transmit rings.
2376 **********************************************************************/
2378 ixgbe_free_transmit_structures(struct adapter *adapter)
2380 struct tx_ring *txr = adapter->tx_rings;
2382 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2384 ixgbe_free_transmit_buffers(txr);
2385 ixgbe_dma_free(adapter, &txr->txdma);
2386 IXGBE_TX_UNLOCK(txr);
2387 IXGBE_TX_LOCK_DESTROY(txr);
2389 free(adapter->tx_rings, M_DEVBUF);
2392 /*********************************************************************
2394 * Free transmit ring related data structures.
2396 **********************************************************************/
2398 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2400 struct adapter *adapter = txr->adapter;
2401 struct ixgbe_tx_buf *tx_buffer;
2404 INIT_DEBUGOUT("free_transmit_ring: begin");
2406 if (txr->tx_buffers == NULL)
2409 tx_buffer = txr->tx_buffers;
2410 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2411 if (tx_buffer->m_head != NULL) {
2412 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2413 BUS_DMASYNC_POSTWRITE);
2414 bus_dmamap_unload(txr->txtag,
2416 m_freem(tx_buffer->m_head);
2417 tx_buffer->m_head = NULL;
2418 if (tx_buffer->map != NULL) {
2419 bus_dmamap_destroy(txr->txtag,
2421 tx_buffer->map = NULL;
2423 } else if (tx_buffer->map != NULL) {
2424 bus_dmamap_unload(txr->txtag,
2426 bus_dmamap_destroy(txr->txtag,
2428 tx_buffer->map = NULL;
2432 if (txr->tx_buffers != NULL) {
2433 free(txr->tx_buffers, M_DEVBUF);
2434 txr->tx_buffers = NULL;
2436 if (txr->txtag != NULL) {
2437 bus_dma_tag_destroy(txr->txtag);
2443 /*********************************************************************
2445 * Advanced Context Descriptor setup for VLAN or CSUM
2447 **********************************************************************/
2450 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2452 struct adapter *adapter = txr->adapter;
2453 struct ixgbe_adv_tx_context_desc *TXD;
2454 struct ixgbe_tx_buf *tx_buffer;
2455 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2456 struct ether_vlan_header *eh;
2458 struct ip6_hdr *ip6;
2459 int ehdrlen, ip_hlen = 0;
2462 bool offload = TRUE;
2463 int ctxd = txr->next_avail_tx_desc;
2464 #if __FreeBSD_version < 700000
2471 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2474 tx_buffer = &txr->tx_buffers[ctxd];
2475 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2478 ** In advanced descriptors the vlan tag must
2479 ** be placed into the descriptor itself.
2481 #if __FreeBSD_version < 700000
2482 mtag = VLAN_OUTPUT_TAG(ifp, mp);
2485 htole16(VLAN_TAG_VALUE(mtag)) << IXGBE_ADVTXD_VLAN_SHIFT;
2486 } else if (offload == FALSE)
2487 return FALSE; /* No need for CTX */
2489 if (mp->m_flags & M_VLANTAG) {
2490 vtag = htole16(mp->m_pkthdr.ether_vtag);
2491 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2492 } else if (offload == FALSE)
2496 * Determine where frame payload starts.
2497 * Jump over vlan headers if already present,
2498 * helpful for QinQ too.
2500 eh = mtod(mp, struct ether_vlan_header *);
2501 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2502 etype = ntohs(eh->evl_proto);
2503 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2505 etype = ntohs(eh->evl_encap_proto);
2506 ehdrlen = ETHER_HDR_LEN;
2509 /* Set the ether header length */
2510 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2514 ip = (struct ip *)(mp->m_data + ehdrlen);
2515 ip_hlen = ip->ip_hl << 2;
2516 if (mp->m_len < ehdrlen + ip_hlen)
2517 return FALSE; /* failure */
2519 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2521 case ETHERTYPE_IPV6:
2522 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2523 ip_hlen = sizeof(struct ip6_hdr);
2524 if (mp->m_len < ehdrlen + ip_hlen)
2525 return FALSE; /* failure */
2526 ipproto = ip6->ip6_nxt;
2527 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2534 vlan_macip_lens |= ip_hlen;
2535 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2539 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2540 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2543 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2544 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2551 /* Now copy bits into descriptor */
2552 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2553 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2554 TXD->seqnum_seed = htole32(0);
2555 TXD->mss_l4len_idx = htole32(0);
2557 #ifndef NO_82598_A0_SUPPORT
2558 if (adapter->hw.revision_id == 0)
2562 tx_buffer->m_head = NULL;
2564 /* We've consumed the first desc, adjust counters */
2565 if (++ctxd == adapter->num_tx_desc)
2567 txr->next_avail_tx_desc = ctxd;
2573 #if __FreeBSD_version >= 700000
2574 /**********************************************************************
2576 * Setup work for hardware segmentation offload (TSO) on
2577 * adapters using advanced tx descriptors
2579 **********************************************************************/
2581 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2583 struct adapter *adapter = txr->adapter;
2584 struct ixgbe_adv_tx_context_desc *TXD;
2585 struct ixgbe_tx_buf *tx_buffer;
2586 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2587 u32 mss_l4len_idx = 0;
2589 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2590 struct ether_vlan_header *eh;
2594 if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
2595 (mp->m_pkthdr.len <= IXGBE_TX_BUFFER_SIZE))
2599 * Determine where frame payload starts.
2600 * Jump over vlan headers if already present
2602 eh = mtod(mp, struct ether_vlan_header *);
2603 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2604 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2606 ehdrlen = ETHER_HDR_LEN;
2608 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2609 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2612 ctxd = txr->next_avail_tx_desc;
2613 tx_buffer = &txr->tx_buffers[ctxd];
2614 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2616 ip = (struct ip *)(mp->m_data + ehdrlen);
2617 if (ip->ip_p != IPPROTO_TCP)
2618 return FALSE; /* 0 */
2621 ip_hlen = ip->ip_hl << 2;
2622 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2623 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2624 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2625 tcp_hlen = th->th_off << 2;
2626 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2627 /* This is used in the transmit desc in encap */
2628 *paylen = mp->m_pkthdr.len - hdrlen;
2630 /* VLAN MACLEN IPLEN */
2631 if (mp->m_flags & M_VLANTAG) {
2632 vtag = htole16(mp->m_pkthdr.ether_vtag);
2633 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2636 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2637 vlan_macip_lens |= ip_hlen;
2638 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2640 /* ADV DTYPE TUCMD */
2641 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2642 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2643 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2644 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2648 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2649 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2650 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2652 TXD->seqnum_seed = htole32(0);
2653 tx_buffer->m_head = NULL;
2655 #ifndef NO_82598_A0_SUPPORT
2656 if (adapter->hw.revision_id == 0)
2660 if (++ctxd == adapter->num_tx_desc)
2664 txr->next_avail_tx_desc = ctxd;
2668 #else /* For 6.2 RELEASE */
2669 /* This makes it easy to keep the code common */
2671 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2677 /**********************************************************************
2679 * Examine each tx_buffer in the used queue. If the hardware is done
2680 * processing the packet then free associated resources. The
2681 * tx_buffer is put back on the free queue.
2683 **********************************************************************/
2685 ixgbe_txeof(struct tx_ring *txr)
2687 struct adapter * adapter = txr->adapter;
2688 struct ifnet *ifp = adapter->ifp;
2689 u32 first, last, done, num_avail;
2691 struct ixgbe_tx_buf *tx_buffer;
2692 struct ixgbe_legacy_tx_desc *tx_desc;
2694 mtx_assert(&txr->mtx, MA_OWNED);
2696 if (txr->tx_avail == adapter->num_tx_desc)
2699 num_avail = txr->tx_avail;
2700 first = txr->next_tx_to_clean;
2702 tx_buffer = &txr->tx_buffers[first];
2703 /* For cleanup we just use legacy struct */
2704 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2710 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2711 BUS_DMASYNC_POSTREAD);
2714 /* We clean the range til last head write back */
2715 while (first != done) {
2716 tx_desc->upper.data = 0;
2717 tx_desc->lower.data = 0;
2718 tx_desc->buffer_addr = 0;
2719 num_avail++; cleaned++;
2721 if (tx_buffer->m_head) {
2723 bus_dmamap_sync(txr->txtag,
2725 BUS_DMASYNC_POSTWRITE);
2726 bus_dmamap_unload(txr->txtag,
2728 m_freem(tx_buffer->m_head);
2729 tx_buffer->m_head = NULL;
2730 tx_buffer->map = NULL;
2733 if (++first == adapter->num_tx_desc)
2736 tx_buffer = &txr->tx_buffers[first];
2738 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2740 /* See if there is more work now */
2747 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2748 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2750 txr->next_tx_to_clean = first;
2753 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2754 * it is OK to send packets. If there are no pending descriptors,
2755 * clear the timeout. Otherwise, if some descriptors have been freed,
2756 * restart the timeout.
2758 if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
2759 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2760 /* If all are clean turn off the timer */
2761 if (num_avail == adapter->num_tx_desc) {
2762 txr->watchdog_timer = 0;
2763 txr->tx_avail = num_avail;
2768 /* Some were cleaned, so reset timer */
2770 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
2771 txr->tx_avail = num_avail;
2775 /*********************************************************************
2777 * Get a buffer from system mbuf buffer pool.
2779 **********************************************************************/
2781 ixgbe_get_buf(struct rx_ring *rxr, int i)
2783 struct adapter *adapter = rxr->adapter;
2786 int nsegs, error, old, s = 0;
2787 int size = MCLBYTES;
2790 bus_dma_segment_t segs[1];
2791 struct ixgbe_rx_buf *rxbuf;
2793 /* Are we going to Jumbo clusters? */
2794 if (adapter->bigbufs) {
2795 size = MJUMPAGESIZE;
2799 mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
2801 adapter->mbuf_alloc_failed++;
2805 mp->m_len = mp->m_pkthdr.len = size;
2807 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2808 m_adj(mp, ETHER_ALIGN);
2811 * Using memory from the mbuf cluster pool, invoke the bus_dma
2812 * machinery to arrange the memory mapping.
2814 error = bus_dmamap_load_mbuf_sg(rxr->rxtag[s], rxr->spare_map[s],
2815 mp, segs, &nsegs, BUS_DMA_NOWAIT);
2821 /* Now check our target buffer for existing mapping */
2822 rxbuf = &rxr->rx_buffers[i];
2823 old = rxbuf->bigbuf;
2824 if (rxbuf->m_head != NULL)
2825 bus_dmamap_unload(rxr->rxtag[old], rxbuf->map[old]);
2827 map = rxbuf->map[old];
2828 rxbuf->map[s] = rxr->spare_map[s];
2829 rxr->spare_map[old] = map;
2830 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s], BUS_DMASYNC_PREREAD);
2834 rxr->rx_base[i].read.pkt_addr = htole64(segs[0].ds_addr);
2836 #ifndef NO_82598_A0_SUPPORT
2837 /* A0 needs to One's Compliment descriptors */
2838 if (adapter->hw.revision_id == 0) {
2839 struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
2842 d = (struct dhack *)&rxr->rx_base[i];
2851 /*********************************************************************
2853 * Allocate memory for rx_buffer structures. Since we use one
2854 * rx_buffer per received packet, the maximum number of rx_buffer's
2855 * that we'll need is equal to the number of receive descriptors
2856 * that we've allocated.
2858 **********************************************************************/
2860 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2862 struct adapter *adapter = rxr->adapter;
2863 device_t dev = adapter->dev;
2864 struct ixgbe_rx_buf *rxbuf;
2865 int i, bsize, error;
2867 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
2868 if (!(rxr->rx_buffers =
2869 (struct ixgbe_rx_buf *) malloc(bsize,
2870 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2871 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2876 /* First make the small (2K) tag/map */
2877 if ((error = bus_dma_tag_create(NULL, /* parent */
2878 PAGE_SIZE, 0, /* alignment, bounds */
2879 BUS_SPACE_MAXADDR, /* lowaddr */
2880 BUS_SPACE_MAXADDR, /* highaddr */
2881 NULL, NULL, /* filter, filterarg */
2882 MCLBYTES, /* maxsize */
2884 MCLBYTES, /* maxsegsize */
2886 NULL, /* lockfunc */
2887 NULL, /* lockfuncarg */
2889 device_printf(dev, "Unable to create RX Small DMA tag\n");
2893 /* Next make the large (4K) tag/map */
2894 if ((error = bus_dma_tag_create(NULL, /* parent */
2895 PAGE_SIZE, 0, /* alignment, bounds */
2896 BUS_SPACE_MAXADDR, /* lowaddr */
2897 BUS_SPACE_MAXADDR, /* highaddr */
2898 NULL, NULL, /* filter, filterarg */
2899 MJUMPAGESIZE, /* maxsize */
2901 MJUMPAGESIZE, /* maxsegsize */
2903 NULL, /* lockfunc */
2904 NULL, /* lockfuncarg */
2906 device_printf(dev, "Unable to create RX Large DMA tag\n");
2910 /* Create the spare maps (used by getbuf) */
2911 error = bus_dmamap_create(rxr->rxtag[0], BUS_DMA_NOWAIT,
2912 &rxr->spare_map[0]);
2913 error = bus_dmamap_create(rxr->rxtag[1], BUS_DMA_NOWAIT,
2914 &rxr->spare_map[1]);
2916 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
2921 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2922 rxbuf = &rxr->rx_buffers[i];
2923 error = bus_dmamap_create(rxr->rxtag[0],
2924 BUS_DMA_NOWAIT, &rxbuf->map[0]);
2926 device_printf(dev, "Unable to create Small RX DMA map\n");
2929 error = bus_dmamap_create(rxr->rxtag[1],
2930 BUS_DMA_NOWAIT, &rxbuf->map[1]);
2932 device_printf(dev, "Unable to create Large RX DMA map\n");
2940 /* Frees all, but can handle partial completion */
2941 ixgbe_free_receive_structures(adapter);
2945 /*********************************************************************
2947 * Initialize a receive ring and its buffers.
2949 **********************************************************************/
2951 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2953 struct adapter *adapter;
2955 struct ixgbe_rx_buf *rxbuf;
2956 struct lro_ctrl *lro = &rxr->lro;
2957 int j, rsize, s = 0;
2959 adapter = rxr->adapter;
2961 rsize = roundup2(adapter->num_rx_desc *
2962 sizeof(union ixgbe_adv_rx_desc), 4096);
2963 /* Clear the ring contents */
2964 bzero((void *)rxr->rx_base, rsize);
2967 ** Free current RX buffers: the size buffer
2968 ** that is loaded is indicated by the buffer
2971 for (int i = 0; i < adapter->num_rx_desc; i++) {
2972 rxbuf = &rxr->rx_buffers[i];
2974 if (rxbuf->m_head != NULL) {
2975 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2976 BUS_DMASYNC_POSTREAD);
2977 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2978 m_freem(rxbuf->m_head);
2979 rxbuf->m_head = NULL;
2983 for (j = 0; j < adapter->num_rx_desc; j++) {
2984 if (ixgbe_get_buf(rxr, j) == ENOBUFS) {
2985 rxr->rx_buffers[j].m_head = NULL;
2986 rxr->rx_base[j].read.pkt_addr = 0;
2987 /* If we fail some may have change size */
2988 s = adapter->bigbufs;
2993 /* Setup our descriptor indices */
2994 rxr->next_to_check = 0;
2995 rxr->last_cleaned = 0;
2997 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2998 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3000 /* Now set up the LRO interface */
3001 if (ixgbe_enable_lro) {
3002 int err = tcp_lro_init(lro);
3004 device_printf(dev,"LRO Initialization failed!\n");
3007 device_printf(dev,"RX LRO Initialized\n");
3008 lro->ifp = adapter->ifp;
3015 * We need to clean up any buffers allocated so far
3016 * 'j' is the failing index, decrement it to get the
3019 for (--j; j < 0; j--) {
3020 rxbuf = &rxr->rx_buffers[j];
3021 if (rxbuf->m_head != NULL) {
3022 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
3023 BUS_DMASYNC_POSTREAD);
3024 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
3025 m_freem(rxbuf->m_head);
3026 rxbuf->m_head = NULL;
3032 /*********************************************************************
3034 * Initialize all receive rings.
3036 **********************************************************************/
3038 ixgbe_setup_receive_structures(struct adapter *adapter)
3040 struct rx_ring *rxr = adapter->rx_rings;
3043 for (i = 0; i < adapter->num_rx_queues; i++, rxr++)
3044 if (ixgbe_setup_receive_ring(rxr))
3050 * Free RX buffers allocated so far, we will only handle
3051 * the rings that completed, the failing case will have
3052 * cleaned up for itself. The value of 'i' will be the
3053 * failed ring so we must pre-decrement it.
3055 rxr = adapter->rx_rings;
3056 for (--i; i > 0; i--, rxr++) {
3057 for (j = 0; j < adapter->num_rx_desc; j++) {
3058 struct ixgbe_rx_buf *rxbuf;
3059 rxbuf = &rxr->rx_buffers[j];
3061 if (rxbuf->m_head != NULL) {
3062 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
3063 BUS_DMASYNC_POSTREAD);
3064 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
3065 m_freem(rxbuf->m_head);
3066 rxbuf->m_head = NULL;
3074 /*********************************************************************
3076 * Enable receive unit.
3078 **********************************************************************/
3080 ixgbe_initialize_receive_units(struct adapter *adapter)
3082 struct rx_ring *rxr = adapter->rx_rings;
3083 struct ifnet *ifp = adapter->ifp;
3084 u32 rxctrl, fctrl, srrctl, rxcsum;
3085 u32 mrqc, hlreg, linkvec;
3095 * Make sure receives are disabled while
3096 * setting up the descriptor ring
3098 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
3099 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
3100 rxctrl & ~IXGBE_RXCTRL_RXEN);
3102 /* Enable broadcasts */
3103 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3104 fctrl |= IXGBE_FCTRL_BAM;
3105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3107 hlreg = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
3108 if (ifp->if_mtu > ETHERMTU)
3109 hlreg |= IXGBE_HLREG0_JUMBOEN;
3111 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3112 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, hlreg);
3114 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
3115 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3116 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3117 if (adapter->bigbufs)
3118 srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3120 srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3121 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3122 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
3124 /* Set Queue moderation rate */
3125 for (i = 0; i < IXGBE_MSGS; i++)
3126 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(i), DEFAULT_ITR);
3128 /* Set Link moderation lower */
3129 linkvec = adapter->num_tx_queues + adapter->num_rx_queues;
3130 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(linkvec), LINK_ITR);
3132 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3133 u64 rdba = rxr->rxdma.dma_paddr;
3134 /* Setup the Base and Length of the Rx Descriptor Ring */
3135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(i),
3136 (rdba & 0x00000000ffffffffULL));
3137 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(i), (rdba >> 32));
3138 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(i),
3139 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3141 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3142 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(i), 0);
3143 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(i),
3144 adapter->num_rx_desc - 1);
3147 rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM);
3149 if (adapter->num_rx_queues > 1) {
3150 /* set up random bits */
3151 arc4rand(&random, sizeof(random), 0);
3153 /* Create reta data */
3154 for (i = 0; i < 128; )
3155 for (j = 0; j < adapter->num_rx_queues &&
3159 /* Set up the redirection table */
3160 for (i = 0; i < 32; i++)
3161 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RETA(i), reta.i[i]);
3163 /* Now fill our hash function seeds */
3164 for (int i = 0; i < 10; i++)
3165 IXGBE_WRITE_REG_ARRAY(&adapter->hw,
3166 IXGBE_RSSRK(0), i, random[i]);
3168 mrqc = IXGBE_MRQC_RSSEN
3169 /* Perform hash on these packet types */
3170 | IXGBE_MRQC_RSS_FIELD_IPV4
3171 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3172 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3173 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3174 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3175 | IXGBE_MRQC_RSS_FIELD_IPV6
3176 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3177 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3178 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3179 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MRQC, mrqc);
3181 /* RSS and RX IPP Checksum are mutually exclusive */
3182 rxcsum |= IXGBE_RXCSUM_PCSD;
3185 if (ifp->if_capenable & IFCAP_RXCSUM)
3186 rxcsum |= IXGBE_RXCSUM_PCSD;
3188 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3189 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3191 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum);
3193 /* Enable Receive engine */
3194 rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
3195 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rxctrl);
3200 /*********************************************************************
3202 * Free all receive rings.
3204 **********************************************************************/
3206 ixgbe_free_receive_structures(struct adapter *adapter)
3208 struct rx_ring *rxr = adapter->rx_rings;
3210 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3211 struct lro_ctrl *lro = &rxr->lro;
3212 ixgbe_free_receive_buffers(rxr);
3213 /* Free LRO memory */
3215 /* Free the ring memory as well */
3216 ixgbe_dma_free(adapter, &rxr->rxdma);
3219 free(adapter->rx_rings, M_DEVBUF);
3222 /*********************************************************************
3224 * Free receive ring data structures
3226 **********************************************************************/
3228 ixgbe_free_receive_buffers(struct rx_ring *rxr)
3230 struct adapter *adapter = NULL;
3231 struct ixgbe_rx_buf *rxbuf = NULL;
3233 INIT_DEBUGOUT("free_receive_buffers: begin");
3234 adapter = rxr->adapter;
3235 if (rxr->rx_buffers != NULL) {
3236 rxbuf = &rxr->rx_buffers[0];
3237 for (int i = 0; i < adapter->num_rx_desc; i++) {
3238 int s = rxbuf->bigbuf;
3239 if (rxbuf->map != NULL) {
3240 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
3241 bus_dmamap_destroy(rxr->rxtag[s], rxbuf->map[s]);
3243 if (rxbuf->m_head != NULL) {
3244 m_freem(rxbuf->m_head);
3246 rxbuf->m_head = NULL;
3250 if (rxr->rx_buffers != NULL) {
3251 free(rxr->rx_buffers, M_DEVBUF);
3252 rxr->rx_buffers = NULL;
3254 for (int s = 0; s < 2; s++) {
3255 if (rxr->rxtag[s] != NULL) {
3256 bus_dma_tag_destroy(rxr->rxtag[s]);
3257 rxr->rxtag[s] = NULL;
3263 /*********************************************************************
3265 * This routine executes in interrupt context. It replenishes
3266 * the mbufs in the descriptor and sends data which has been
3267 * dma'ed into host memory to upper layer.
3269 * We loop at most count times if count is > 0, or until done if
3272 *********************************************************************/
3274 ixgbe_rxeof(struct rx_ring *rxr, int count)
3276 struct adapter *adapter = rxr->adapter;
3277 struct ifnet *ifp = adapter->ifp;
3278 struct lro_ctrl *lro = &rxr->lro;
3279 struct lro_entry *queued;
3281 int len, i, eop = 0;
3282 u8 accept_frame = 0;
3284 union ixgbe_adv_rx_desc *cur;
3288 i = rxr->next_to_check;
3289 cur = &rxr->rx_base[i];
3290 staterr = cur->wb.upper.status_error;
3292 if (!(staterr & IXGBE_RXD_STAT_DD)) {
3293 IXGBE_RX_UNLOCK(rxr);
3297 while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
3298 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3299 struct mbuf *m = NULL;
3302 mp = rxr->rx_buffers[i].m_head;
3303 s = rxr->rx_buffers[i].bigbuf;
3304 bus_dmamap_sync(rxr->rxtag[s], rxr->rx_buffers[i].map[s],
3305 BUS_DMASYNC_POSTREAD);
3307 if (staterr & IXGBE_RXD_STAT_EOP) {
3313 len = cur->wb.upper.length;
3315 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
3319 /* Get a fresh buffer first */
3320 if (ixgbe_get_buf(rxr, i) != 0) {
3325 /* Assign correct length to the current fragment */
3328 if (rxr->fmp == NULL) {
3329 mp->m_pkthdr.len = len;
3330 rxr->fmp = mp; /* Store the first mbuf */
3333 /* Chain mbuf's together */
3334 mp->m_flags &= ~M_PKTHDR;
3335 rxr->lmp->m_next = mp;
3336 rxr->lmp = rxr->lmp->m_next;
3337 rxr->fmp->m_pkthdr.len += len;
3341 rxr->fmp->m_pkthdr.rcvif = ifp;
3343 rxr->packet_count++;
3344 rxr->byte_count += rxr->fmp->m_pkthdr.len;
3346 ixgbe_rx_checksum(adapter,
3349 if (staterr & IXGBE_RXD_STAT_VP) {
3350 #if __FreeBSD_version < 700000
3351 VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
3352 (le16toh(cur->wb.upper.vlan) &
3353 IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
3355 rxr->fmp->m_pkthdr.ether_vtag =
3356 le16toh(cur->wb.upper.vlan);
3357 rxr->fmp->m_flags |= M_VLANTAG;
3367 /* Reuse loaded DMA map and just update mbuf chain */
3368 mp = rxr->rx_buffers[i].m_head;
3369 mp->m_len = mp->m_pkthdr.len =
3370 (rxr->rx_buffers[i].bigbuf ? MJUMPAGESIZE:MCLBYTES);
3371 mp->m_data = mp->m_ext.ext_buf;
3373 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3374 m_adj(mp, ETHER_ALIGN);
3375 if (rxr->fmp != NULL) {
3383 /* Zero out the receive descriptors status */
3384 cur->wb.upper.status_error = 0;
3385 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3386 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3388 rxr->last_cleaned = i; /* for updating tail */
3390 if (++i == adapter->num_rx_desc)
3393 /* Now send up to the stack */
3395 rxr->next_to_check = i;
3396 /* Use LRO if possible */
3397 if ((!lro->lro_cnt) || (tcp_lro_rx(lro, m, 0))) {
3398 IXGBE_RX_UNLOCK(rxr);
3399 (*ifp->if_input)(ifp, m);
3401 i = rxr->next_to_check;
3404 /* Get next descriptor */
3405 cur = &rxr->rx_base[i];
3406 staterr = cur->wb.upper.status_error;
3408 rxr->next_to_check = i;
3410 /* Advance the IXGB's Receive Queue "Tail Pointer" */
3411 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
3412 IXGBE_RX_UNLOCK(rxr);
3415 ** Flush any outstanding LRO work
3416 ** this may call into the stack and
3417 ** must not hold a driver lock.
3419 while(!SLIST_EMPTY(&lro->lro_active)) {
3420 queued = SLIST_FIRST(&lro->lro_active);
3421 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3422 tcp_lro_flush(lro, queued);
3425 if (!(staterr & IXGBE_RXD_STAT_DD))
3431 /*********************************************************************
3433 * Verify that the hardware indicated that the checksum is valid.
3434 * Inform the stack about the status of checksum so that stack
3435 * doesn't spend time verifying the checksum.
3437 *********************************************************************/
3439 ixgbe_rx_checksum(struct adapter *adapter,
3440 u32 staterr, struct mbuf * mp)
3442 struct ifnet *ifp = adapter->ifp;
3443 u16 status = (u16) staterr;
3444 u8 errors = (u8) (staterr >> 24);
3446 /* Not offloading */
3447 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
3448 mp->m_pkthdr.csum_flags = 0;
3452 if (status & IXGBE_RXD_STAT_IPCS) {
3454 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3455 /* IP Checksum Good */
3456 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3457 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3460 mp->m_pkthdr.csum_flags = 0;
3462 if (status & IXGBE_RXD_STAT_L4CS) {
3464 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3465 mp->m_pkthdr.csum_flags |=
3466 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3467 mp->m_pkthdr.csum_data = htons(0xffff);
3473 #ifdef IXGBE_VLAN_EVENTS
3475 * This routine is run via an vlan
3479 ixgbe_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
3481 struct adapter *adapter = ifp->if_softc;
3484 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
3485 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
3486 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3487 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
3489 /* Make entry in the hardware filter table */
3490 ixgbe_set_vfta(&adapter->hw, vtag, 0, TRUE);
3494 * This routine is run via an vlan
3498 ixgbe_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
3500 struct adapter *adapter = ifp->if_softc;
3502 /* Remove entry in the hardware filter table */
3503 ixgbe_set_vfta(&adapter->hw, vtag, 0, FALSE);
3505 /* Have all vlans unregistered? */
3506 if (adapter->ifp->if_vlantrunk == NULL) {
3508 /* Turn off the filter table */
3509 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
3510 ctrl &= ~IXGBE_VLNCTRL_VME;
3511 ctrl &= ~IXGBE_VLNCTRL_VFE;
3512 ctrl |= IXGBE_VLNCTRL_CFIEN;
3513 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
3516 #endif /* IXGBE_VLAN_EVENTS */
3519 ixgbe_enable_intr(struct adapter *adapter)
3521 struct ixgbe_hw *hw = &adapter->hw;
3522 u32 mask = IXGBE_EIMS_ENABLE_MASK;
3524 /* Enable Fan Failure detection */
3525 if (hw->phy.media_type == ixgbe_media_type_copper)
3526 mask |= IXGBE_EIMS_GPI_SDP1;
3527 /* With RSS we use auto clear */
3528 if (adapter->msix_mem) {
3529 /* Dont autoclear Link */
3530 mask &= ~IXGBE_EIMS_OTHER;
3531 mask &= ~IXGBE_EIMS_LSC;
3532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
3533 adapter->eims_mask | mask);
3536 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3537 IXGBE_WRITE_FLUSH(hw);
3543 ixgbe_disable_intr(struct adapter *adapter)
3545 if (adapter->msix_mem)
3546 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3547 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3548 IXGBE_WRITE_FLUSH(&adapter->hw);
3553 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
3557 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
3564 ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector)
3568 vector |= IXGBE_IVAR_ALLOC_VAL;
3569 index = (entry >> 2) & 0x1F;
3570 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
3571 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3572 ivar |= (vector << (8 * (entry & 0x3)));
3573 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3577 ixgbe_configure_ivars(struct adapter *adapter)
3579 struct tx_ring *txr = adapter->tx_rings;
3580 struct rx_ring *rxr = adapter->rx_rings;
3582 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3583 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), rxr->msix);
3584 adapter->eims_mask |= rxr->eims;
3587 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
3588 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), txr->msix);
3589 adapter->eims_mask |= txr->eims;
3592 /* For the Link interrupt */
3593 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
3595 adapter->eims_mask |= IXGBE_IVAR_OTHER_CAUSES_INDEX;
3598 /**********************************************************************
3600 * Update the board statistics counters.
3602 **********************************************************************/
3604 ixgbe_update_stats_counters(struct adapter *adapter)
3606 struct ifnet *ifp = adapter->ifp;;
3607 struct ixgbe_hw *hw = &adapter->hw;
3608 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3610 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3612 for (int i = 0; i < 8; i++) {
3614 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3616 adapter->stats.mpc[i] += mp;
3617 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3620 /* Hardware workaround, gprc counts missed packets */
3621 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3622 adapter->stats.gprc -= missed_rx;
3624 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3625 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3626 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3629 * Workaround: mprc hardware is incorrectly counting
3630 * broadcasts, so for now we subtract those.
3632 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3633 adapter->stats.bprc += bprc;
3634 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3635 adapter->stats.mprc -= bprc;
3637 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3638 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3639 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3640 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3641 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3642 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3643 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3644 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3646 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3647 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3649 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3650 adapter->stats.lxontxc += lxon;
3651 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3652 adapter->stats.lxofftxc += lxoff;
3653 total = lxon + lxoff;
3655 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3656 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3657 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3658 adapter->stats.gptc -= total;
3659 adapter->stats.mptc -= total;
3660 adapter->stats.ptc64 -= total;
3661 adapter->stats.gotc -= total * ETHER_MIN_LEN;
3663 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3664 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3665 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3666 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3667 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3668 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3669 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3670 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3671 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3672 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3675 /* Fill out the OS statistics structure */
3676 ifp->if_ipackets = adapter->stats.gprc;
3677 ifp->if_opackets = adapter->stats.gptc;
3678 ifp->if_ibytes = adapter->stats.gorc;
3679 ifp->if_obytes = adapter->stats.gotc;
3680 ifp->if_imcasts = adapter->stats.mprc;
3681 ifp->if_collisions = 0;
3684 ifp->if_ierrors = missed_rx + adapter->stats.crcerrs +
3685 adapter->stats.rlec;
3689 /**********************************************************************
3691 * This routine is called only when ixgbe_display_debug_stats is enabled.
3692 * This routine provides a way to take a look at important statistics
3693 * maintained by the driver and hardware.
3695 **********************************************************************/
3697 ixgbe_print_hw_stats(struct adapter * adapter)
3699 device_t dev = adapter->dev;
3702 device_printf(dev,"Std Mbuf Failed = %lu\n",
3703 adapter->mbuf_alloc_failed);
3704 device_printf(dev,"Std Cluster Failed = %lu\n",
3705 adapter->mbuf_cluster_failed);
3707 device_printf(dev,"Missed Packets = %llu\n",
3708 (long long)adapter->stats.mpc[0]);
3709 device_printf(dev,"Receive length errors = %llu\n",
3710 ((long long)adapter->stats.roc +
3711 (long long)adapter->stats.ruc));
3712 device_printf(dev,"Crc errors = %llu\n",
3713 (long long)adapter->stats.crcerrs);
3714 device_printf(dev,"Driver dropped packets = %lu\n",
3715 adapter->dropped_pkts);
3716 device_printf(dev, "watchdog timeouts = %ld\n",
3717 adapter->watchdog_events);
3719 device_printf(dev,"XON Rcvd = %llu\n",
3720 (long long)adapter->stats.lxonrxc);
3721 device_printf(dev,"XON Xmtd = %llu\n",
3722 (long long)adapter->stats.lxontxc);
3723 device_printf(dev,"XOFF Rcvd = %llu\n",
3724 (long long)adapter->stats.lxoffrxc);
3725 device_printf(dev,"XOFF Xmtd = %llu\n",
3726 (long long)adapter->stats.lxofftxc);
3728 device_printf(dev,"Total Packets Rcvd = %llu\n",
3729 (long long)adapter->stats.tpr);
3730 device_printf(dev,"Good Packets Rcvd = %llu\n",
3731 (long long)adapter->stats.gprc);
3732 device_printf(dev,"Good Packets Xmtd = %llu\n",
3733 (long long)adapter->stats.gptc);
3734 device_printf(dev,"TSO Transmissions = %lu\n",
3740 /**********************************************************************
3742 * This routine is called only when em_display_debug_stats is enabled.
3743 * This routine provides a way to take a look at important statistics
3744 * maintained by the driver and hardware.
3746 **********************************************************************/
3748 ixgbe_print_debug_info(struct adapter *adapter)
3750 device_t dev = adapter->dev;
3751 struct rx_ring *rxr = adapter->rx_rings;
3752 struct tx_ring *txr = adapter->tx_rings;
3753 struct ixgbe_hw *hw = &adapter->hw;
3755 device_printf(dev,"Error Byte Count = %u \n",
3756 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3758 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3759 struct lro_ctrl *lro = &rxr->lro;
3760 device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
3761 i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
3762 IXGBE_READ_REG(hw, IXGBE_RDT(i)));
3763 device_printf(dev,"RX(%d) Packets Received: %lu\n",
3764 rxr->me, (long)rxr->packet_count);
3765 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3766 rxr->me, (long)rxr->byte_count);
3767 device_printf(dev,"RX(%d) IRQ Handled: %lu\n",
3768 rxr->me, (long)rxr->rx_irq);
3769 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3770 rxr->me, lro->lro_queued);
3771 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3772 rxr->me, lro->lro_flushed);
3775 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
3776 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
3777 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
3778 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
3779 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3780 txr->me, (long)txr->tx_packets);
3781 device_printf(dev,"TX(%d) IRQ Handled: %lu\n",
3782 txr->me, (long)txr->tx_irq);
3783 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3784 txr->me, (long)txr->no_tx_desc_avail);
3787 device_printf(dev,"Link IRQ Handled: %lu\n",
3788 (long)adapter->link_irq);
3793 ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS)
3797 struct adapter *adapter;
3800 error = sysctl_handle_int(oidp, &result, 0, req);
3802 if (error || !req->newptr)
3806 adapter = (struct adapter *) arg1;
3807 ixgbe_print_hw_stats(adapter);
3813 ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS)
3816 struct adapter *adapter;
3819 error = sysctl_handle_int(oidp, &result, 0, req);
3821 if (error || !req->newptr)
3825 adapter = (struct adapter *) arg1;
3826 ixgbe_print_debug_info(adapter);
3832 ** Set flow control using sysctl:
3833 ** Flow control values:
3840 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
3843 struct adapter *adapter;
3845 error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
3850 adapter = (struct adapter *) arg1;
3851 switch (ixgbe_flow_control) {
3852 case ixgbe_fc_rx_pause:
3853 case ixgbe_fc_tx_pause:
3855 adapter->hw.fc.type = ixgbe_flow_control;
3859 adapter->hw.fc.type = ixgbe_fc_none;
3862 ixgbe_setup_fc(&adapter->hw, 0);
3867 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
3868 const char *description, int *limit, int value)
3871 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3872 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3873 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3876 #ifndef NO_82598_A0_SUPPORT
3878 * A0 Workaround: invert descriptor for hardware
3881 desc_flip(void *desc)
3883 struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
3886 d = (struct dhack *)desc;
3891 d->b2 &= 0xFFFFFFF0;
3892 d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;