1 /******************************************************************************
3 Copyright (c) 2001-2009, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
41 /*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44 int ixgbe_display_debug_stats = 0;
46 /*********************************************************************
48 *********************************************************************/
49 char ixgbe_driver_version[] = "1.7.4";
51 /*********************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 *********************************************************************/
61 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
76 /* required last entry */
80 /*********************************************************************
81 * Table of branding strings
82 *********************************************************************/
84 static char *ixgbe_strings[] = {
85 "Intel(R) PRO/10GbE PCI-Express Network Driver"
88 /*********************************************************************
90 *********************************************************************/
91 static int ixgbe_probe(device_t);
92 static int ixgbe_attach(device_t);
93 static int ixgbe_detach(device_t);
94 static int ixgbe_shutdown(device_t);
95 static void ixgbe_start(struct ifnet *);
96 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
97 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
98 static void ixgbe_watchdog(struct adapter *);
99 static void ixgbe_init(void *);
100 static void ixgbe_init_locked(struct adapter *);
101 static void ixgbe_stop(void *);
102 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
103 static int ixgbe_media_change(struct ifnet *);
104 static void ixgbe_identify_hardware(struct adapter *);
105 static int ixgbe_allocate_pci_resources(struct adapter *);
106 static int ixgbe_allocate_msix(struct adapter *);
107 static int ixgbe_allocate_legacy(struct adapter *);
108 static int ixgbe_allocate_queues(struct adapter *);
109 #if __FreeBSD_version >= 602105
110 static int ixgbe_setup_msix(struct adapter *);
112 static void ixgbe_free_pci_resources(struct adapter *);
113 static void ixgbe_local_timer(void *);
114 static int ixgbe_hardware_init(struct adapter *);
115 static void ixgbe_setup_interface(device_t, struct adapter *);
117 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
118 static int ixgbe_setup_transmit_structures(struct adapter *);
119 static void ixgbe_setup_transmit_ring(struct tx_ring *);
120 static void ixgbe_initialize_transmit_units(struct adapter *);
121 static void ixgbe_free_transmit_structures(struct adapter *);
122 static void ixgbe_free_transmit_buffers(struct tx_ring *);
124 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
125 static int ixgbe_setup_receive_structures(struct adapter *);
126 static int ixgbe_setup_receive_ring(struct rx_ring *);
127 static void ixgbe_initialize_receive_units(struct adapter *);
128 static void ixgbe_free_receive_structures(struct adapter *);
129 static void ixgbe_free_receive_buffers(struct rx_ring *);
131 static void ixgbe_init_moderation(struct adapter *);
132 static void ixgbe_enable_intr(struct adapter *);
133 static void ixgbe_disable_intr(struct adapter *);
134 static void ixgbe_update_stats_counters(struct adapter *);
135 static bool ixgbe_txeof(struct tx_ring *);
136 static bool ixgbe_rxeof(struct rx_ring *, int);
137 static void ixgbe_rx_checksum(u32, struct mbuf *);
138 static void ixgbe_set_promisc(struct adapter *);
139 static void ixgbe_disable_promisc(struct adapter *);
140 static void ixgbe_set_multi(struct adapter *);
141 static void ixgbe_print_hw_stats(struct adapter *);
142 static void ixgbe_print_debug_info(struct adapter *);
143 static void ixgbe_update_link_status(struct adapter *);
144 static int ixgbe_get_buf(struct rx_ring *, int, u8);
145 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
146 static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
147 static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
148 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
149 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
150 struct ixgbe_dma_alloc *, int);
151 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
152 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
153 const char *, int *, int);
154 static int ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
155 static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
156 static void ixgbe_set_ivar(struct adapter *, u16, u8, s8);
157 static void ixgbe_configure_ivars(struct adapter *);
158 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
160 #ifdef IXGBE_HW_VLAN_SUPPORT
161 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
162 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
165 #ifdef IXGBE_TIMESYNC
166 /* Precision Time sync support */
167 static int ixgbe_tsync_init(struct adapter *);
168 static void ixgbe_tsync_disable(struct adapter *);
171 static void ixgbe_update_aim(struct rx_ring *);
173 /* Support for pluggable optic modules */
174 static bool ixgbe_sfp_probe(struct adapter *);
176 /* Legacy (single vector interrupt handler */
177 static void ixgbe_legacy_irq(void *);
179 #if __FreeBSD_version >= 602105
180 /* The MSI/X Interrupt handlers */
181 static void ixgbe_msix_tx(void *);
182 static void ixgbe_msix_rx(void *);
183 static void ixgbe_msix_link(void *);
186 /* Deferred interrupt tasklets */
187 static void ixgbe_handle_tx(void *, int);
188 static void ixgbe_handle_rx(void *, int);
189 static void ixgbe_handle_link(void *, int);
190 static void ixgbe_handle_msf(void *, int);
191 static void ixgbe_handle_mod(void *, int);
194 /*********************************************************************
195 * FreeBSD Device Interface Entry Points
196 *********************************************************************/
198 static device_method_t ixgbe_methods[] = {
199 /* Device interface */
200 DEVMETHOD(device_probe, ixgbe_probe),
201 DEVMETHOD(device_attach, ixgbe_attach),
202 DEVMETHOD(device_detach, ixgbe_detach),
203 DEVMETHOD(device_shutdown, ixgbe_shutdown),
207 static driver_t ixgbe_driver = {
208 "ix", ixgbe_methods, sizeof(struct adapter),
211 static devclass_t ixgbe_devclass;
212 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
214 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
215 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
218 ** TUNEABLE PARAMETERS:
222 ** These parameters are used in Adaptive
223 ** Interrupt Moderation. The value is set
224 ** into EITR and controls the interrupt
225 ** frequency. They can be modified but
226 ** be careful in tuning them.
228 static int ixgbe_enable_aim = TRUE;
229 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
230 static int ixgbe_low_latency = IXGBE_LOW_LATENCY;
231 TUNABLE_INT("hw.ixgbe.low_latency", &ixgbe_low_latency);
232 static int ixgbe_ave_latency = IXGBE_AVE_LATENCY;
233 TUNABLE_INT("hw.ixgbe.ave_latency", &ixgbe_ave_latency);
234 static int ixgbe_bulk_latency = IXGBE_BULK_LATENCY;
235 TUNABLE_INT("hw.ixgbe.bulk_latency", &ixgbe_bulk_latency);
237 /* How many packets rxeof tries to clean at a time */
238 static int ixgbe_rx_process_limit = 100;
239 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
241 /* Flow control setting, default to off */
242 static int ixgbe_flow_control = ixgbe_fc_full;
243 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
246 * Should the driver do LRO on the RX end
247 * this can be toggled on the fly, but the
248 * interface must be reset (down/up) for it
251 static int ixgbe_enable_lro = 1;
252 TUNABLE_INT("hw.ixgbe.enable_lro", &ixgbe_enable_lro);
255 * MSIX should be the default for best performance,
256 * but this allows it to be forced off for testing.
258 #if __FreeBSD_version >= 602105
259 static int ixgbe_enable_msix = 1;
261 static int ixgbe_enable_msix = 0;
263 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
266 * Enable RX Header Split
267 * WARNING: disable this if bridging or forwarding!!
269 static int ixgbe_rx_hdr_split = 1;
270 TUNABLE_INT("hw.ixgbe.rx_hdr_split", &ixgbe_rx_hdr_split);
273 * Number of TX/RX Queues, with 0 setting
274 * it autoconfigures to the number of cpus.
276 static int ixgbe_tx_queues = 1;
277 TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
278 static int ixgbe_rx_queues = 1;
279 TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
281 /* Number of TX descriptors per ring */
282 static int ixgbe_txd = DEFAULT_TXD;
283 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
285 /* Number of RX descriptors per ring */
286 static int ixgbe_rxd = DEFAULT_RXD;
287 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
289 /* Total number of Interfaces - need for config sanity check */
290 static int ixgbe_total_ports;
293 ** The number of scatter-gather segments
294 ** differs for 82598 and 82599, default to
297 static int ixgbe_num_segs = IXGBE_82598_SCATTER;
299 /*********************************************************************
300 * Device identification routine
302 * ixgbe_probe determines if the driver should be loaded on
303 * adapter based on PCI vendor/device id of the adapter.
305 * return 0 on success, positive on failure
306 *********************************************************************/
309 ixgbe_probe(device_t dev)
311 ixgbe_vendor_info_t *ent;
313 u16 pci_vendor_id = 0;
314 u16 pci_device_id = 0;
315 u16 pci_subvendor_id = 0;
316 u16 pci_subdevice_id = 0;
317 char adapter_name[256];
319 INIT_DEBUGOUT("ixgbe_probe: begin");
321 pci_vendor_id = pci_get_vendor(dev);
322 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
325 pci_device_id = pci_get_device(dev);
326 pci_subvendor_id = pci_get_subvendor(dev);
327 pci_subdevice_id = pci_get_subdevice(dev);
329 ent = ixgbe_vendor_info_array;
330 while (ent->vendor_id != 0) {
331 if ((pci_vendor_id == ent->vendor_id) &&
332 (pci_device_id == ent->device_id) &&
334 ((pci_subvendor_id == ent->subvendor_id) ||
335 (ent->subvendor_id == 0)) &&
337 ((pci_subdevice_id == ent->subdevice_id) ||
338 (ent->subdevice_id == 0))) {
339 sprintf(adapter_name, "%s, Version - %s",
340 ixgbe_strings[ent->index],
341 ixgbe_driver_version);
342 device_set_desc_copy(dev, adapter_name);
351 /*********************************************************************
352 * Device initialization routine
354 * The attach entry point is called when the driver is being loaded.
355 * This routine identifies the type of hardware, allocates all resources
356 * and initializes the hardware.
358 * return 0 on success, positive on failure
359 *********************************************************************/
362 ixgbe_attach(device_t dev)
364 struct adapter *adapter;
370 INIT_DEBUGOUT("ixgbe_attach: begin");
372 /* Allocate, clear, and link in our adapter structure */
373 adapter = device_get_softc(dev);
374 adapter->dev = adapter->osdep.dev = dev;
378 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
380 /* Keep track of optics */
381 pci_device_id = pci_get_device(dev);
382 switch (pci_device_id) {
383 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
384 case IXGBE_DEV_ID_82598EB_CX4 :
385 adapter->optics = IFM_10G_CX4;
387 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
388 case IXGBE_DEV_ID_82598_DA_DUAL_PORT :
389 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
390 case IXGBE_DEV_ID_82598AT :
391 adapter->optics = IFM_10G_SR;
393 case IXGBE_DEV_ID_82598EB_XF_LR :
394 adapter->optics = IFM_10G_LR;
396 case IXGBE_DEV_ID_82599_SFP :
397 adapter->optics = IFM_10G_SR;
398 ixgbe_num_segs = IXGBE_82599_SCATTER;
400 case IXGBE_DEV_ID_82599_KX4 :
401 adapter->optics = IFM_10G_CX4;
402 ixgbe_num_segs = IXGBE_82599_SCATTER;
408 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
409 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
410 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
411 adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
413 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
414 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
415 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
416 adapter, 0, ixgbe_sysctl_debug, "I", "Debug Info");
418 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
420 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
421 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
423 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
424 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425 OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
426 &ixgbe_enable_lro, 1, "Large Receive Offload");
428 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
429 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
431 &ixgbe_enable_aim, 1, "Interrupt Moderation");
433 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
434 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
435 OID_AUTO, "low_latency", CTLTYPE_INT|CTLFLAG_RW,
436 &ixgbe_low_latency, 1, "Low Latency");
438 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
439 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
440 OID_AUTO, "ave_latency", CTLTYPE_INT|CTLFLAG_RW,
441 &ixgbe_ave_latency, 1, "Average Latency");
443 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
444 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
445 OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
446 &ixgbe_bulk_latency, 1, "Bulk Latency");
448 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
449 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
450 OID_AUTO, "hdr_split", CTLTYPE_INT|CTLFLAG_RW,
451 &ixgbe_rx_hdr_split, 1, "RX Header Split");
453 /* Set up the timer callout */
454 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
456 /* Determine hardware revision */
457 ixgbe_identify_hardware(adapter);
459 /* Do base PCI setup - map BAR0 */
460 if (ixgbe_allocate_pci_resources(adapter)) {
461 device_printf(dev, "Allocation of PCI resources failed\n");
466 /* Do descriptor calc and sanity checks */
467 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
468 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
469 device_printf(dev, "TXD config issue, using default!\n");
470 adapter->num_tx_desc = DEFAULT_TXD;
472 adapter->num_tx_desc = ixgbe_txd;
475 ** With many RX rings it is easy to exceed the
476 ** system mbuf allocation. Tuning nmbclusters
477 ** can alleviate this.
479 if ((adapter->num_rx_queues > 1) && (nmbclusters > 0 )){
481 /* Calculate the total RX mbuf needs */
482 s = (ixgbe_rxd * adapter->num_rx_queues) * ixgbe_total_ports;
483 if (s > nmbclusters) {
484 device_printf(dev, "RX Descriptors exceed "
485 "system mbuf max, using default instead!\n");
486 ixgbe_rxd = DEFAULT_RXD;
490 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
491 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
492 device_printf(dev, "RXD config issue, using default!\n");
493 adapter->num_rx_desc = DEFAULT_RXD;
495 adapter->num_rx_desc = ixgbe_rxd;
497 /* Allocate our TX/RX Queues */
498 if (ixgbe_allocate_queues(adapter)) {
503 /* Initialize the shared code */
504 error = ixgbe_init_shared_code(hw);
505 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
507 ** No optics in this port, set up
508 ** so the timer routine will probe
509 ** for later insertion.
511 adapter->sfp_probe = TRUE;
513 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
514 device_printf(dev,"Unsupported SFP+ module detected!\n");
518 device_printf(dev,"Unable to initialize the shared code\n");
523 /* Initialize the hardware */
524 if (ixgbe_hardware_init(adapter)) {
525 device_printf(dev,"Unable to initialize the hardware\n");
530 if ((adapter->msix > 1) && (ixgbe_enable_msix))
531 error = ixgbe_allocate_msix(adapter);
533 error = ixgbe_allocate_legacy(adapter);
537 /* Setup OS specific network interface */
538 ixgbe_setup_interface(dev, adapter);
540 /* Sysctl for limiting the amount of work done in the taskqueue */
541 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
542 "max number of rx packets to process", &adapter->rx_process_limit,
543 ixgbe_rx_process_limit);
545 /* Initialize statistics */
546 ixgbe_update_stats_counters(adapter);
548 #ifdef IXGBE_HW_VLAN_SUPPORT
549 /* Register for VLAN events */
550 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
551 ixgbe_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
552 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
553 ixgbe_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
556 /* let hardware know driver is loaded */
557 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
558 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
559 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
561 INIT_DEBUGOUT("ixgbe_attach: end");
564 ixgbe_free_transmit_structures(adapter);
565 ixgbe_free_receive_structures(adapter);
567 ixgbe_free_pci_resources(adapter);
572 /*********************************************************************
573 * Device removal routine
575 * The detach entry point is called when the driver is being removed.
576 * This routine stops the adapter and deallocates all the resources
577 * that were allocated for driver operation.
579 * return 0 on success, positive on failure
580 *********************************************************************/
583 ixgbe_detach(device_t dev)
585 struct adapter *adapter = device_get_softc(dev);
586 struct tx_ring *txr = adapter->tx_rings;
587 struct rx_ring *rxr = adapter->rx_rings;
590 INIT_DEBUGOUT("ixgbe_detach: begin");
592 /* Make sure VLANS are not using driver */
593 #if __FreeBSD_version >= 700000
594 if (adapter->ifp->if_vlantrunk != NULL) {
596 if (adapter->ifp->if_nvlans != 0) {
598 device_printf(dev,"Vlan in use, detach first\n");
602 IXGBE_CORE_LOCK(adapter);
604 IXGBE_CORE_UNLOCK(adapter);
606 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
608 taskqueue_drain(txr->tq, &txr->tx_task);
609 taskqueue_free(txr->tq);
613 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
615 taskqueue_drain(rxr->tq, &rxr->rx_task);
616 taskqueue_free(rxr->tq);
620 /* Drain the Link queue */
622 taskqueue_drain(adapter->tq, &adapter->link_task);
623 taskqueue_drain(adapter->tq, &adapter->mod_task);
624 taskqueue_drain(adapter->tq, &adapter->msf_task);
625 taskqueue_free(adapter->tq);
628 /* let hardware know driver is unloading */
629 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
630 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
631 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
633 #ifdef IXGBE_HW_VLAN_SUPPORT
634 /* Unregister VLAN events */
635 if (adapter->vlan_attach != NULL)
636 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
637 if (adapter->vlan_detach != NULL)
638 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
641 ether_ifdetach(adapter->ifp);
642 callout_drain(&adapter->timer);
643 ixgbe_free_pci_resources(adapter);
644 bus_generic_detach(dev);
645 if_free(adapter->ifp);
647 ixgbe_free_transmit_structures(adapter);
648 ixgbe_free_receive_structures(adapter);
650 IXGBE_CORE_LOCK_DESTROY(adapter);
654 /*********************************************************************
656 * Shutdown entry point
658 **********************************************************************/
661 ixgbe_shutdown(device_t dev)
663 struct adapter *adapter = device_get_softc(dev);
664 IXGBE_CORE_LOCK(adapter);
666 IXGBE_CORE_UNLOCK(adapter);
671 /*********************************************************************
672 * Transmit entry point
674 * ixgbe_start is called by the stack to initiate a transmit.
675 * The driver will remain in this routine as long as there are
676 * packets to transmit and transmit resources are available.
677 * In case resources are not available stack is notified and
678 * the packet is requeued.
679 **********************************************************************/
682 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
685 struct adapter *adapter = txr->adapter;
687 IXGBE_TX_LOCK_ASSERT(txr);
689 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
692 if (!adapter->link_active)
695 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
697 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
701 * Force a cleanup if number of TX descriptors
702 * available is below the threshold. If it fails
703 * to get above, then abort transmit.
705 if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
707 /* Make sure things have improved */
708 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
709 txr->no_tx_desc_avail++;
714 if (ixgbe_xmit(txr, &m_head)) {
717 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
718 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
721 /* Send a copy of the frame to the BPF listener */
722 ETHER_BPF_MTAP(ifp, m_head);
724 /* Set timeout in case hardware has problems transmitting */
725 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
733 ixgbe_start(struct ifnet *ifp)
735 struct adapter *adapter = ifp->if_softc;
736 struct tx_ring *txr = adapter->tx_rings;
740 ** This is really just here for testing
741 ** TX multiqueue, ultimately what is
742 ** needed is the flow support in the stack
743 ** and appropriate logic here to deal with
746 if (adapter->num_tx_queues > 1)
747 queue = (curcpu % adapter->num_tx_queues);
749 txr = &adapter->tx_rings[queue];
751 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
752 if (IXGBE_TX_TRYLOCK(txr) == 0)
754 ixgbe_start_locked(txr, ifp);
755 IXGBE_TX_UNLOCK(txr);
760 /*********************************************************************
763 * ixgbe_ioctl is called when the user wants to configure the
766 * return 0 on success, positive on failure
767 **********************************************************************/
770 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
773 struct ifreq *ifr = (struct ifreq *) data;
774 struct ifaddr *ifa = (struct ifaddr *) data;
775 struct adapter *adapter = ifp->if_softc;
779 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
780 if (ifa->ifa_addr->sa_family == AF_INET) {
781 ifp->if_flags |= IFF_UP;
782 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
783 IXGBE_CORE_LOCK(adapter);
784 ixgbe_init_locked(adapter);
785 IXGBE_CORE_UNLOCK(adapter);
787 arp_ifinit(ifp, ifa);
789 ether_ioctl(ifp, command, data);
792 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
793 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
796 IXGBE_CORE_LOCK(adapter);
797 ifp->if_mtu = ifr->ifr_mtu;
798 adapter->max_frame_size =
799 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
800 ixgbe_init_locked(adapter);
801 IXGBE_CORE_UNLOCK(adapter);
805 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
806 IXGBE_CORE_LOCK(adapter);
807 if (ifp->if_flags & IFF_UP) {
808 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
809 if ((ifp->if_flags ^ adapter->if_flags) &
810 (IFF_PROMISC | IFF_ALLMULTI)) {
811 ixgbe_disable_promisc(adapter);
812 ixgbe_set_promisc(adapter);
815 ixgbe_init_locked(adapter);
817 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
819 adapter->if_flags = ifp->if_flags;
820 IXGBE_CORE_UNLOCK(adapter);
824 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
825 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
826 IXGBE_CORE_LOCK(adapter);
827 ixgbe_disable_intr(adapter);
828 ixgbe_set_multi(adapter);
829 ixgbe_enable_intr(adapter);
830 IXGBE_CORE_UNLOCK(adapter);
835 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
836 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
840 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
841 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
842 if (mask & IFCAP_HWCSUM)
843 ifp->if_capenable ^= IFCAP_HWCSUM;
844 if (mask & IFCAP_TSO4)
845 ifp->if_capenable ^= IFCAP_TSO4;
846 if (mask & IFCAP_VLAN_HWTAGGING)
847 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
848 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
850 #if __FreeBSD_version >= 700000
851 VLAN_CAPABILITIES(ifp);
855 #ifdef IXGBE_TIMESYNC
857 ** IOCTL support for Precision Time (IEEE 1588) Support
859 case IXGBE_TIMESYNC_READTS:
862 struct ixgbe_tsync_read *tdata;
864 tdata = (struct ixgbe_tsync_read *) ifr->ifr_data;
866 if (tdata->read_current_time) {
867 getnanotime(&tdata->system_time);
868 tdata->network_time = IXGBE_READ_REG(&adapter->hw,
870 tdata->network_time |=
871 (u64)IXGBE_READ_REG(&adapter->hw,
872 IXGBE_SYSTIMH ) << 32;
875 rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
876 tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
880 unsigned char *tmp_cp;
883 tdata->rx_stamp = IXGBE_READ_REG(&adapter->hw,
885 tdata->rx_stamp |= (u64)IXGBE_READ_REG(&adapter->hw,
886 IXGBE_RXSTMPH) << 32;
888 tmp = IXGBE_READ_REG(&adapter->hw, IXGBE_RXSATRL);
889 tmp_cp = (unsigned char *) &tmp;
890 tdata->srcid[0] = tmp_cp[0];
891 tdata->srcid[1] = tmp_cp[1];
892 tdata->srcid[2] = tmp_cp[2];
893 tdata->srcid[3] = tmp_cp[3];
894 tmp = IXGBE_READ_REG(&adapter->hw, IXGBE_RXSATRH);
895 tmp_cp = (unsigned char *) &tmp;
896 tdata->srcid[4] = tmp_cp[0];
897 tdata->srcid[5] = tmp_cp[1];
898 tdata->seqid = tmp >> 16;
899 tdata->seqid = htons(tdata->seqid);
905 tdata->tx_stamp = IXGBE_READ_REG(&adapter->hw,
907 tdata->tx_stamp |= (u64) IXGBE_READ_REG(&adapter->hw,
908 IXGBE_TXSTMPH) << 32;
914 #endif /* IXGBE_TIMESYNC */
917 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
918 error = ether_ioctl(ifp, command, data);
925 /*********************************************************************
926 * Watchdog entry point
928 * This routine is called by the local timer
929 * to detect hardware hangs .
931 **********************************************************************/
934 ixgbe_watchdog(struct adapter *adapter)
936 device_t dev = adapter->dev;
937 struct tx_ring *txr = adapter->tx_rings;
938 struct ixgbe_hw *hw = &adapter->hw;
939 bool tx_hang = FALSE;
941 IXGBE_CORE_LOCK_ASSERT(adapter);
944 * The timer is set to 5 every time ixgbe_start() queues a packet.
945 * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
946 * least one descriptor.
947 * Finally, anytime all descriptors are clean the timer is
950 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
954 if (txr->watchdog_timer == 0 || --txr->watchdog_timer) {
955 IXGBE_TX_UNLOCK(txr);
958 head = IXGBE_READ_REG(hw, IXGBE_TDH(i));
959 tail = IXGBE_READ_REG(hw, IXGBE_TDT(i));
960 if (head == tail) { /* last minute check */
961 IXGBE_TX_UNLOCK(txr);
964 /* Well, seems something is really hung */
966 IXGBE_TX_UNLOCK(txr);
970 if (tx_hang == FALSE)
974 * If we are in this routine because of pause frames, then don't
975 * reset the hardware.
977 if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
978 txr = adapter->tx_rings; /* reset pointer */
979 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
981 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
982 IXGBE_TX_UNLOCK(txr);
988 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
989 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
990 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
991 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
992 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
993 device_printf(dev,"TX(%d) desc avail = %d,"
994 "Next TX to Clean = %d\n",
995 i, txr->tx_avail, txr->next_tx_to_clean);
997 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
998 adapter->watchdog_events++;
1000 ixgbe_init_locked(adapter);
1003 /*********************************************************************
1006 * This routine is used in two ways. It is used by the stack as
1007 * init entry point in network interface structure. It is also used
1008 * by the driver as a hw/sw initialization routine to get to a
1011 * return 0 on success, positive on failure
1012 **********************************************************************/
1013 #define IXGBE_MHADD_MFS_SHIFT 16
1016 ixgbe_init_locked(struct adapter *adapter)
1018 struct ifnet *ifp = adapter->ifp;
1019 device_t dev = adapter->dev;
1020 struct ixgbe_hw *hw;
1021 u32 k, txdctl, mhadd, gpie;
1025 INIT_DEBUGOUT("ixgbe_init: begin");
1028 mtx_assert(&adapter->core_mtx, MA_OWNED);
1030 ixgbe_stop(adapter);
1032 /* Get the latest mac address, User can use a LAA */
1033 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1034 IXGBE_ETH_LENGTH_OF_ADDRESS);
1035 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, 1);
1036 adapter->hw.addr_ctrl.rar_used_count = 1;
1038 /* Initialize the hardware */
1039 if (ixgbe_hardware_init(adapter)) {
1040 device_printf(dev, "Unable to initialize the hardware\n");
1044 #ifndef IXGBE_HW_VLAN_SUPPORT
1045 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1048 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1049 ctrl |= IXGBE_VLNCTRL_VME;
1050 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1051 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1054 /* Prepare transmit descriptors and buffers */
1055 if (ixgbe_setup_transmit_structures(adapter)) {
1056 device_printf(dev,"Could not setup transmit structures\n");
1057 ixgbe_stop(adapter);
1061 ixgbe_initialize_transmit_units(adapter);
1063 /* Setup Multicast table */
1064 ixgbe_set_multi(adapter);
1067 ** Determine the correct mbuf pool
1068 ** for doing jumbo/headersplit
1070 if (ifp->if_mtu > ETHERMTU)
1071 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1073 adapter->rx_mbuf_sz = MCLBYTES;
1075 /* Prepare receive descriptors and buffers */
1076 if (ixgbe_setup_receive_structures(adapter)) {
1077 device_printf(dev,"Could not setup receive structures\n");
1078 ixgbe_stop(adapter);
1082 /* Configure RX settings */
1083 ixgbe_initialize_receive_units(adapter);
1085 /* Configure Interrupt Moderation */
1086 ixgbe_init_moderation(adapter);
1088 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1090 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1091 gpie |= IXGBE_SDP1_GPIEN;
1092 gpie |= IXGBE_SDP2_GPIEN;
1095 /* Enable Fan Failure Interrupt */
1096 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1097 gpie |= IXGBE_SDP1_GPIEN;
1099 if (adapter->msix > 2) {
1100 /* Enable Enhanced MSIX mode */
1101 gpie |= IXGBE_GPIE_MSIX_MODE;
1102 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
1107 /* Set the various hardware offload abilities */
1108 ifp->if_hwassist = 0;
1109 if (ifp->if_capenable & IFCAP_TSO4)
1110 ifp->if_hwassist |= CSUM_TSO;
1111 else if (ifp->if_capenable & IFCAP_TXCSUM)
1112 ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
1115 if (ifp->if_mtu > ETHERMTU) {
1116 mhadd = IXGBE_READ_REG(&adapter->hw, IXGBE_MHADD);
1117 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1118 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1119 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MHADD, mhadd);
1122 /* Now enable all the queues */
1124 for (int i = 0; i < adapter->num_tx_queues; i++) {
1125 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
1126 txdctl |= IXGBE_TXDCTL_ENABLE;
1127 /* Set WTHRESH to 8, burst writeback */
1128 txdctl |= (8 << 16);
1129 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
1132 for (int i = 0; i < adapter->num_rx_queues; i++) {
1133 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
1134 /* PTHRESH set to 32 */
1136 rxdctl |= IXGBE_RXDCTL_ENABLE;
1137 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
1138 for (k = 0; k < 10; k++) {
1139 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1140 IXGBE_RXDCTL_ENABLE)
1146 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1149 /* Enable Receive engine */
1150 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1151 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1152 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1153 rxctrl |= IXGBE_RXCTRL_RXEN;
1154 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1156 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1158 /* Set up MSI/X routing */
1159 if (ixgbe_enable_msix)
1160 ixgbe_configure_ivars(adapter);
1161 else { /* Simple settings for Legacy/MSI */
1162 ixgbe_set_ivar(adapter, 0, 0, 0);
1163 ixgbe_set_ivar(adapter, 0, 0, 1);
1166 ixgbe_enable_intr(adapter);
1169 ** Check on any SFP devices that
1170 ** need to be kick-started
1172 err = hw->phy.ops.identify(hw);
1173 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1175 "Unsupported SFP+ module type was detected.\n");
1179 if (ixgbe_is_sfp(hw)) {
1180 if (hw->phy.multispeed_fiber) {
1181 hw->mac.ops.setup_sfp(hw);
1182 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1184 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1186 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1189 #ifdef IXGBE_TIMESYNC
1190 /* Initialize IEEE 1588 support */
1191 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1192 ixgbe_tsync_init(adapter);
1195 /* Now inform the stack we're ready */
1196 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1197 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1203 ixgbe_init(void *arg)
1205 struct adapter *adapter = arg;
1207 IXGBE_CORE_LOCK(adapter);
1208 ixgbe_init_locked(adapter);
1209 IXGBE_CORE_UNLOCK(adapter);
1215 ** MSIX Interrupt Tasklets
1219 ixgbe_handle_rx(void *context, int pending)
1221 struct rx_ring *rxr = context;
1222 struct adapter *adapter = rxr->adapter;
1223 u32 loop = MAX_LOOP;
1227 more = ixgbe_rxeof(rxr, -1);
1228 } while (loop-- && more);
1229 /* Reenable this interrupt */
1230 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1234 ixgbe_handle_tx(void *context, int pending)
1236 struct tx_ring *txr = context;
1237 struct adapter *adapter = txr->adapter;
1238 struct ifnet *ifp = adapter->ifp;
1239 u32 loop = MAX_LOOP;
1244 more = ixgbe_txeof(txr);
1245 } while (loop-- && more);
1247 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1248 ixgbe_start_locked(txr, ifp);
1250 IXGBE_TX_UNLOCK(txr);
1252 /* Reenable this interrupt */
1253 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1257 /*********************************************************************
1259 * Legacy Interrupt Service routine
1261 **********************************************************************/
1264 ixgbe_legacy_irq(void *arg)
1266 struct adapter *adapter = arg;
1267 struct ixgbe_hw *hw = &adapter->hw;
1268 struct tx_ring *txr = adapter->tx_rings;
1269 struct rx_ring *rxr = adapter->rx_rings;
1271 u32 reg_eicr, loop = MAX_LOOP;
1274 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1276 if (reg_eicr == 0) {
1277 ixgbe_enable_intr(adapter);
1281 if (ixgbe_rxeof(rxr, adapter->rx_process_limit))
1282 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1287 more = ixgbe_txeof(txr);
1288 } while (loop-- && more);
1289 IXGBE_TX_UNLOCK(txr);
1292 taskqueue_enqueue(txr->tq, &txr->tx_task);
1294 /* Check for fan failure */
1295 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1296 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1297 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1298 "REPLACE IMMEDIATELY!!\n");
1299 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1302 /* Link status change */
1303 if (reg_eicr & IXGBE_EICR_LSC) {
1304 ixgbe_check_link(&adapter->hw,
1305 &adapter->link_speed, &adapter->link_up, 0);
1306 ixgbe_update_link_status(adapter);
1309 /* Update interrupt rate */
1310 if (ixgbe_enable_aim == TRUE)
1311 ixgbe_update_aim(rxr);
1313 ixgbe_enable_intr(adapter);
1318 #if __FreeBSD_version >= 602105
1319 /*********************************************************************
1321 * MSI TX Interrupt Service routine
1323 **********************************************************************/
1325 ixgbe_msix_tx(void *arg)
1327 struct tx_ring *txr = arg;
1328 struct adapter *adapter = txr->adapter;
1331 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, txr->eims);
1335 more = ixgbe_txeof(txr);
1336 IXGBE_TX_UNLOCK(txr);
1338 taskqueue_enqueue(txr->tq, &txr->tx_task);
1339 else /* Reenable this interrupt */
1340 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1345 /*********************************************************************
1347 * MSIX RX Interrupt Service routine
1349 **********************************************************************/
1351 ixgbe_msix_rx(void *arg)
1353 struct rx_ring *rxr = arg;
1354 struct adapter *adapter = rxr->adapter;
1357 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims);
1360 more = ixgbe_rxeof(rxr, adapter->rx_process_limit);
1362 /* Update interrupt rate */
1363 if (ixgbe_enable_aim == TRUE)
1364 ixgbe_update_aim(rxr);
1367 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1369 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1375 ixgbe_msix_link(void *arg)
1377 struct adapter *adapter = arg;
1378 struct ixgbe_hw *hw = &adapter->hw;
1381 ++adapter->link_irq;
1383 /* First get the cause */
1384 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, IXGBE_EIMS_OTHER);
1385 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1386 /* Clear with write */
1387 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1389 /* Link status change */
1390 if (reg_eicr & IXGBE_EICR_LSC)
1391 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1393 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1394 if (reg_eicr & IXGBE_EICR_ECC) {
1395 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1396 "Please Reboot!!\n");
1397 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1399 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1400 /* Clear the interrupt */
1401 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1402 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1403 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1404 /* Clear the interrupt */
1405 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1406 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1410 /* Check for fan failure */
1411 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1412 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1413 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1414 "REPLACE IMMEDIATELY!!\n");
1415 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1418 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1421 #endif /* FreeBSD_version >= 602105 */
1424 ** Routine to do adjust the RX EITR value based on traffic,
1425 ** its a simple three state model, but seems to help.
1427 ** Note that the three EITR values are tuneable using
1428 ** sysctl in real time. The feature can be effectively
1429 ** nullified by setting them equal.
1431 #define BULK_THRESHOLD 10000
1432 #define AVE_THRESHOLD 1600
1435 ixgbe_update_aim(struct rx_ring *rxr)
1437 struct adapter *adapter = rxr->adapter;
1440 /* Update interrupt moderation based on traffic */
1441 olditr = rxr->eitr_setting;
1444 /* Idle, don't change setting */
1445 if (rxr->bytes == 0)
1448 if (olditr == ixgbe_low_latency) {
1449 if (rxr->bytes > AVE_THRESHOLD)
1450 newitr = ixgbe_ave_latency;
1451 } else if (olditr == ixgbe_ave_latency) {
1452 if (rxr->bytes < AVE_THRESHOLD)
1453 newitr = ixgbe_low_latency;
1454 else if (rxr->bytes > BULK_THRESHOLD)
1455 newitr = ixgbe_bulk_latency;
1456 } else if (olditr == ixgbe_bulk_latency) {
1457 if (rxr->bytes < BULK_THRESHOLD)
1458 newitr = ixgbe_ave_latency;
1461 if (olditr != newitr) {
1462 /* Change interrupt rate */
1463 rxr->eitr_setting = newitr;
1464 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rxr->me),
1465 newitr | (newitr << 16));
1473 ixgbe_init_moderation(struct adapter *adapter)
1475 struct rx_ring *rxr = adapter->rx_rings;
1476 struct tx_ring *txr = adapter->tx_rings;
1478 /* Single interrupt - MSI or Legacy? */
1479 if (adapter->msix < 2) {
1480 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(0), 100);
1484 /* TX irq moderation rate is fixed */
1485 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
1486 IXGBE_WRITE_REG(&adapter->hw,
1487 IXGBE_EITR(txr->msix), ixgbe_ave_latency);
1488 txr->watchdog_timer = FALSE;
1491 /* RX moderation will be adapted over time, set default */
1492 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
1493 IXGBE_WRITE_REG(&adapter->hw,
1494 IXGBE_EITR(rxr->msix), ixgbe_low_latency);
1497 /* Set Link moderation */
1498 IXGBE_WRITE_REG(&adapter->hw,
1499 IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
1503 /*********************************************************************
1505 * Media Ioctl callback
1507 * This routine is called whenever the user queries the status of
1508 * the interface using ifconfig.
1510 **********************************************************************/
1512 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1514 struct adapter *adapter = ifp->if_softc;
1516 INIT_DEBUGOUT("ixgbe_media_status: begin");
1517 IXGBE_CORE_LOCK(adapter);
1518 ixgbe_update_link_status(adapter);
1520 ifmr->ifm_status = IFM_AVALID;
1521 ifmr->ifm_active = IFM_ETHER;
1523 if (!adapter->link_active) {
1524 IXGBE_CORE_UNLOCK(adapter);
1528 ifmr->ifm_status |= IFM_ACTIVE;
1530 switch (adapter->link_speed) {
1531 case IXGBE_LINK_SPEED_1GB_FULL:
1532 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1534 case IXGBE_LINK_SPEED_10GB_FULL:
1535 ifmr->ifm_active |= adapter->optics | IFM_FDX;
1539 IXGBE_CORE_UNLOCK(adapter);
1544 /*********************************************************************
1546 * Media Ioctl callback
1548 * This routine is called when the user changes speed/duplex using
1549 * media/mediopt option with ifconfig.
1551 **********************************************************************/
1553 ixgbe_media_change(struct ifnet * ifp)
1555 struct adapter *adapter = ifp->if_softc;
1556 struct ifmedia *ifm = &adapter->media;
1558 INIT_DEBUGOUT("ixgbe_media_change: begin");
1560 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1563 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1565 adapter->hw.mac.autoneg = TRUE;
1566 adapter->hw.phy.autoneg_advertised =
1567 IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
1570 device_printf(adapter->dev, "Only auto media type\n");
1577 /*********************************************************************
1579 * This routine maps the mbufs to tx descriptors.
1580 * WARNING: while this code is using an MQ style infrastructure,
1581 * it would NOT work as is with more than 1 queue.
1583 * return 0 on success, positive on failure
1584 **********************************************************************/
1587 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1589 struct adapter *adapter = txr->adapter;
1590 u32 olinfo_status = 0, cmd_type_len;
1592 int i, j, error, nsegs;
1593 int first, last = 0, offload = 0;
1594 struct mbuf *m_head;
1595 bus_dma_segment_t segs[ixgbe_num_segs];
1597 struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1598 union ixgbe_adv_tx_desc *txd = NULL;
1602 /* Basic descriptor defines */
1603 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1604 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1606 if (m_head->m_flags & M_VLANTAG)
1607 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1610 * Important to capture the first descriptor
1611 * used because it will contain the index of
1612 * the one we tell the hardware to report back
1614 first = txr->next_avail_tx_desc;
1615 txbuf = &txr->tx_buffers[first];
1616 txbuf_mapped = txbuf;
1620 * Map the packet for DMA.
1622 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1623 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1625 if (error == EFBIG) {
1628 m = m_defrag(*m_headp, M_DONTWAIT);
1630 adapter->mbuf_defrag_failed++;
1638 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1639 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1641 if (error == ENOMEM) {
1642 adapter->no_tx_dma_setup++;
1644 } else if (error != 0) {
1645 adapter->no_tx_dma_setup++;
1650 } else if (error == ENOMEM) {
1651 adapter->no_tx_dma_setup++;
1653 } else if (error != 0) {
1654 adapter->no_tx_dma_setup++;
1660 /* Make certain there are enough descriptors */
1661 if (nsegs > txr->tx_avail - 2) {
1662 txr->no_tx_desc_avail++;
1669 ** Set up the appropriate offload context
1670 ** this becomes the first descriptor of
1673 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1674 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1675 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1676 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1677 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1678 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1682 } else /* Offloads other than TSO */
1683 offload = ixgbe_tx_ctx_setup(txr, m_head);
1684 if (offload == TRUE)
1685 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1686 #ifdef IXGBE_TIMESYNC
1687 if (offload == IXGBE_TIMESTAMP)
1688 cmd_type_len |= IXGBE_ADVTXD_TSTAMP;
1690 /* Record payload length */
1692 olinfo_status |= m_head->m_pkthdr.len <<
1693 IXGBE_ADVTXD_PAYLEN_SHIFT;
1695 i = txr->next_avail_tx_desc;
1696 for (j = 0; j < nsegs; j++) {
1700 txbuf = &txr->tx_buffers[i];
1701 txd = &txr->tx_base[i];
1702 seglen = segs[j].ds_len;
1703 segaddr = htole64(segs[j].ds_addr);
1705 txd->read.buffer_addr = segaddr;
1706 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1707 cmd_type_len |seglen);
1708 txd->read.olinfo_status = htole32(olinfo_status);
1709 last = i; /* Next descriptor that will get completed */
1711 if (++i == adapter->num_tx_desc)
1714 txbuf->m_head = NULL;
1715 txbuf->eop_index = -1;
1718 txd->read.cmd_type_len |=
1719 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1720 txr->tx_avail -= nsegs;
1721 txr->next_avail_tx_desc = i;
1723 txbuf->m_head = m_head;
1725 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1727 /* Set the index of the descriptor that will be marked done */
1728 txbuf = &txr->tx_buffers[first];
1729 txbuf->eop_index = last;
1731 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1732 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1734 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1735 * hardware that this frame is available to transmit.
1737 ++txr->total_packets;
1738 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1742 bus_dmamap_unload(txr->txtag, txbuf->map);
1748 ixgbe_set_promisc(struct adapter *adapter)
1752 struct ifnet *ifp = adapter->ifp;
1754 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1756 if (ifp->if_flags & IFF_PROMISC) {
1757 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1758 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1759 } else if (ifp->if_flags & IFF_ALLMULTI) {
1760 reg_rctl |= IXGBE_FCTRL_MPE;
1761 reg_rctl &= ~IXGBE_FCTRL_UPE;
1762 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1768 ixgbe_disable_promisc(struct adapter * adapter)
1772 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1774 reg_rctl &= (~IXGBE_FCTRL_UPE);
1775 reg_rctl &= (~IXGBE_FCTRL_MPE);
1776 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1782 /*********************************************************************
1785 * This routine is called whenever multicast address list is updated.
1787 **********************************************************************/
1788 #define IXGBE_RAR_ENTRIES 16
1791 ixgbe_set_multi(struct adapter *adapter)
1794 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1796 struct ifmultiaddr *ifma;
1798 struct ifnet *ifp = adapter->ifp;
1800 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1802 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1803 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1804 if (ifp->if_flags & IFF_PROMISC)
1805 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1806 else if (ifp->if_flags & IFF_ALLMULTI) {
1807 fctrl |= IXGBE_FCTRL_MPE;
1808 fctrl &= ~IXGBE_FCTRL_UPE;
1810 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1812 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1815 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1816 if (ifma->ifma_addr->sa_family != AF_LINK)
1818 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1819 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1820 IXGBE_ETH_LENGTH_OF_ADDRESS);
1823 IF_ADDR_UNLOCK(ifp);
1826 ixgbe_update_mc_addr_list(&adapter->hw,
1827 update_ptr, mcnt, ixgbe_mc_array_itr);
1833 * This is an iterator function now needed by the multicast
1834 * shared code. It simply feeds the shared code routine the
1835 * addresses in the array of ixgbe_set_multi() one by one.
1838 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1840 u8 *addr = *update_ptr;
1844 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1845 *update_ptr = newptr;
1850 /*********************************************************************
1853 * This routine checks for link status,updates statistics,
1854 * and runs the watchdog timer.
1856 **********************************************************************/
1859 ixgbe_local_timer(void *arg)
1861 struct adapter *adapter = arg;
1862 struct ifnet *ifp = adapter->ifp;
1864 mtx_assert(&adapter->core_mtx, MA_OWNED);
1866 /* Check for pluggable optics */
1867 if (adapter->sfp_probe)
1868 if (!ixgbe_sfp_probe(adapter))
1869 goto out; /* Nothing to do */
1871 ixgbe_update_link_status(adapter);
1872 ixgbe_update_stats_counters(adapter);
1873 if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1874 ixgbe_print_hw_stats(adapter);
1877 * Each tick we check the watchdog
1878 * to protect against hardware hangs.
1880 ixgbe_watchdog(adapter);
1883 /* Trigger an RX interrupt on all queues */
1884 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, adapter->rx_mask);
1886 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1890 ** Note: this routine updates the OS on the link state
1891 ** the real check of the hardware only happens with
1892 ** a link interrupt.
1895 ixgbe_update_link_status(struct adapter *adapter)
1897 struct ifnet *ifp = adapter->ifp;
1898 struct tx_ring *txr = adapter->tx_rings;
1899 device_t dev = adapter->dev;
1902 if (adapter->link_up){
1903 if (adapter->link_active == FALSE) {
1905 device_printf(dev,"Link is up %d Gbps %s \n",
1906 ((adapter->link_speed == 128)? 10:1),
1908 adapter->link_active = TRUE;
1909 if_link_state_change(ifp, LINK_STATE_UP);
1911 } else { /* Link down */
1912 if (adapter->link_active == TRUE) {
1914 device_printf(dev,"Link is Down\n");
1915 if_link_state_change(ifp, LINK_STATE_DOWN);
1916 adapter->link_active = FALSE;
1917 for (int i = 0; i < adapter->num_tx_queues;
1919 txr->watchdog_timer = FALSE;
1927 /*********************************************************************
1929 * This routine disables all traffic on the adapter by issuing a
1930 * global reset on the MAC and deallocates TX/RX buffers.
1932 **********************************************************************/
1935 ixgbe_stop(void *arg)
1938 struct adapter *adapter = arg;
1941 mtx_assert(&adapter->core_mtx, MA_OWNED);
1943 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1944 ixgbe_disable_intr(adapter);
1946 /* Tell the stack that the interface is no longer active */
1947 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1949 #ifdef IXGBE_TIMESYNC
1950 /* Disable IEEE 1588 support */
1951 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1952 ixgbe_tsync_disable(adapter);
1954 ixgbe_reset_hw(&adapter->hw);
1955 adapter->hw.adapter_stopped = FALSE;
1956 ixgbe_stop_adapter(&adapter->hw);
1957 callout_stop(&adapter->timer);
1959 /* reprogram the RAR[0] in case user changed it. */
1960 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1966 /*********************************************************************
1968 * Determine hardware revision.
1970 **********************************************************************/
1972 ixgbe_identify_hardware(struct adapter *adapter)
1974 device_t dev = adapter->dev;
1976 /* Save off the information about this board */
1977 adapter->hw.vendor_id = pci_get_vendor(dev);
1978 adapter->hw.device_id = pci_get_device(dev);
1979 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1980 adapter->hw.subsystem_vendor_id =
1981 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1982 adapter->hw.subsystem_device_id =
1983 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1988 /*********************************************************************
1990 * Setup the Legacy or MSI Interrupt handler
1992 **********************************************************************/
1994 ixgbe_allocate_legacy(struct adapter *adapter)
1996 device_t dev = adapter->dev;
1997 struct tx_ring *txr = adapter->tx_rings;
1998 struct rx_ring *rxr = adapter->rx_rings;
2001 /* Legacy RID at 0 */
2002 if (adapter->msix == 0)
2003 adapter->rid[0] = 0;
2005 /* We allocate a single interrupt resource */
2006 adapter->res[0] = bus_alloc_resource_any(dev,
2007 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2008 if (adapter->res[0] == NULL) {
2009 device_printf(dev, "Unable to allocate bus resource: "
2015 * Try allocating a fast interrupt and the associated deferred
2016 * processing contexts.
2018 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
2019 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
2020 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
2021 taskqueue_thread_enqueue, &txr->tq);
2022 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
2023 taskqueue_thread_enqueue, &rxr->tq);
2024 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2025 device_get_nameunit(adapter->dev));
2026 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2027 device_get_nameunit(adapter->dev));
2029 if ((error = bus_setup_intr(dev, adapter->res[0],
2030 #if __FreeBSD_version >= 700000
2031 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2033 INTR_TYPE_NET | INTR_MPSAFE, ixgbe_legacy_irq,
2035 adapter, &adapter->tag[0])) != 0) {
2036 device_printf(dev, "Failed to register fast interrupt "
2037 "handler: %d\n", error);
2038 taskqueue_free(txr->tq);
2039 taskqueue_free(rxr->tq);
2049 #if __FreeBSD_version >= 602105
2050 /*********************************************************************
2052 * Setup MSIX Interrupt resources and handlers
2054 **********************************************************************/
2056 ixgbe_allocate_msix(struct adapter *adapter)
2058 device_t dev = adapter->dev;
2059 struct tx_ring *txr = adapter->tx_rings;
2060 struct rx_ring *rxr = adapter->rx_rings;
2061 int error, vector = 0;
2063 /* TX setup: the code is here for multi tx,
2064 there are other parts of the driver not ready for it */
2065 for (int i = 0; i < adapter->num_tx_queues; i++, vector++, txr++) {
2066 adapter->res[vector] = bus_alloc_resource_any(dev,
2067 SYS_RES_IRQ, &adapter->rid[vector],
2068 RF_SHAREABLE | RF_ACTIVE);
2069 if (!adapter->res[vector]) {
2070 device_printf(dev,"Unable to allocate"
2071 " bus resource: tx interrupt [%d]\n", vector);
2074 /* Set the handler function */
2075 error = bus_setup_intr(dev, adapter->res[vector],
2076 INTR_TYPE_NET | INTR_MPSAFE,
2077 #if __FreeBSD_version > 700000
2080 ixgbe_msix_tx, txr, &adapter->tag[vector]);
2082 adapter->res[vector] = NULL;
2083 device_printf(dev, "Failed to register TX handler");
2087 txr->eims = 1 << vector;
2088 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
2089 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
2090 taskqueue_thread_enqueue, &txr->tq);
2091 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2092 device_get_nameunit(adapter->dev));
2096 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rxr++) {
2097 adapter->res[vector] = bus_alloc_resource_any(dev,
2098 SYS_RES_IRQ, &adapter->rid[vector],
2099 RF_SHAREABLE | RF_ACTIVE);
2100 if (!adapter->res[vector]) {
2101 device_printf(dev,"Unable to allocate"
2102 " bus resource: rx interrupt [%d],"
2103 "rid = %d\n", i, adapter->rid[vector]);
2106 /* Set the handler function */
2107 error = bus_setup_intr(dev, adapter->res[vector],
2108 INTR_TYPE_NET | INTR_MPSAFE,
2109 #if __FreeBSD_version > 700000
2112 ixgbe_msix_rx, rxr, &adapter->tag[vector]);
2114 adapter->res[vector] = NULL;
2115 device_printf(dev, "Failed to register RX handler");
2119 rxr->eims = 1 << vector;
2120 /* used in local timer */
2121 adapter->rx_mask |= rxr->eims;
2122 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
2123 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
2124 taskqueue_thread_enqueue, &rxr->tq);
2125 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2126 device_get_nameunit(adapter->dev));
2129 /* Now for Link changes */
2130 adapter->res[vector] = bus_alloc_resource_any(dev,
2131 SYS_RES_IRQ, &adapter->rid[vector], RF_SHAREABLE | RF_ACTIVE);
2132 if (!adapter->res[vector]) {
2133 device_printf(dev,"Unable to allocate"
2134 " bus resource: Link interrupt [%d]\n", adapter->rid[vector]);
2137 /* Set the link handler function */
2138 error = bus_setup_intr(dev, adapter->res[vector],
2139 INTR_TYPE_NET | INTR_MPSAFE,
2140 #if __FreeBSD_version > 700000
2143 ixgbe_msix_link, adapter, &adapter->tag[vector]);
2145 adapter->res[vector] = NULL;
2146 device_printf(dev, "Failed to register LINK handler");
2149 adapter->linkvec = vector;
2150 /* Tasklets for Link, SFP and Multispeed Fiber */
2151 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2152 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2153 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2154 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2155 taskqueue_thread_enqueue, &adapter->tq);
2156 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2157 device_get_nameunit(adapter->dev));
2161 #else /* Freebsd 6.1/2 */
2163 ixgbe_allocate_msix(struct adapter *adapter)
2169 #if __FreeBSD_version >= 602105
2171 * Setup Either MSI/X or MSI
2174 ixgbe_setup_msix(struct adapter *adapter)
2176 device_t dev = adapter->dev;
2177 int rid, want, queues, msgs;
2179 /* Override by tuneable */
2180 if (ixgbe_enable_msix == 0)
2183 /* First try MSI/X */
2184 rid = PCIR_BAR(MSIX_82598_BAR);
2185 adapter->msix_mem = bus_alloc_resource_any(dev,
2186 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2187 if (!adapter->msix_mem) {
2188 rid += 4; /* 82599 maps in higher BAR */
2189 adapter->msix_mem = bus_alloc_resource_any(dev,
2190 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2192 if (!adapter->msix_mem) {
2193 /* May not be enabled */
2194 device_printf(adapter->dev,
2195 "Unable to map MSIX table \n");
2199 msgs = pci_msix_count(dev);
2200 if (msgs == 0) { /* system has msix disabled */
2201 bus_release_resource(dev, SYS_RES_MEMORY,
2202 rid, adapter->msix_mem);
2203 adapter->msix_mem = NULL;
2207 /* Figure out a reasonable auto config value */
2208 queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
2210 if (ixgbe_tx_queues == 0)
2211 ixgbe_tx_queues = queues;
2212 if (ixgbe_rx_queues == 0)
2213 ixgbe_rx_queues = queues;
2214 want = ixgbe_tx_queues + ixgbe_rx_queues + 1;
2218 device_printf(adapter->dev,
2219 "MSIX Configuration Problem, "
2220 "%d vectors but %d queues wanted!\n",
2224 if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
2225 device_printf(adapter->dev,
2226 "Using MSIX interrupts with %d vectors\n", msgs);
2227 adapter->num_tx_queues = ixgbe_tx_queues;
2228 adapter->num_rx_queues = ixgbe_rx_queues;
2232 msgs = pci_msi_count(dev);
2233 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2234 device_printf(adapter->dev,"Using MSI interrupt\n");
2237 #endif /* FreeBSD_version >= 602105 */
2240 ixgbe_allocate_pci_resources(struct adapter *adapter)
2243 device_t dev = adapter->dev;
2246 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2249 if (!(adapter->pci_mem)) {
2250 device_printf(dev,"Unable to allocate bus resource: memory\n");
2254 adapter->osdep.mem_bus_space_tag =
2255 rman_get_bustag(adapter->pci_mem);
2256 adapter->osdep.mem_bus_space_handle =
2257 rman_get_bushandle(adapter->pci_mem);
2258 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2261 * Init the resource arrays
2263 for (int i = 0; i < IXGBE_MSGS; i++) {
2264 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2265 adapter->tag[i] = NULL;
2266 adapter->res[i] = NULL;
2269 /* Legacy defaults */
2270 adapter->num_tx_queues = 1;
2271 adapter->num_rx_queues = 1;
2273 #if __FreeBSD_version >= 602105
2274 /* Now setup MSI or MSI/X */
2275 adapter->msix = ixgbe_setup_msix(adapter);
2277 adapter->hw.back = &adapter->osdep;
2282 ixgbe_free_pci_resources(struct adapter * adapter)
2284 device_t dev = adapter->dev;
2288 * Legacy has this set to 0, but we need
2289 * to run this once, so reset it.
2291 if (adapter->msix == 0)
2294 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2295 rid = PCIR_BAR(MSIX_82598_BAR);
2297 rid = PCIR_BAR(MSIX_82599_BAR);
2300 * First release all the interrupt resources:
2301 * notice that since these are just kept
2302 * in an array we can do the same logic
2303 * whether its MSIX or just legacy.
2305 for (int i = 0; i < adapter->msix; i++) {
2306 if (adapter->tag[i] != NULL) {
2307 bus_teardown_intr(dev, adapter->res[i],
2309 adapter->tag[i] = NULL;
2311 if (adapter->res[i] != NULL) {
2312 bus_release_resource(dev, SYS_RES_IRQ,
2313 adapter->rid[i], adapter->res[i]);
2317 #if __FreeBSD_version >= 602105
2319 pci_release_msi(dev);
2321 if (adapter->msix_mem != NULL)
2322 bus_release_resource(dev, SYS_RES_MEMORY,
2323 rid, adapter->msix_mem);
2326 if (adapter->pci_mem != NULL)
2327 bus_release_resource(dev, SYS_RES_MEMORY,
2328 PCIR_BAR(0), adapter->pci_mem);
2333 /*********************************************************************
2335 * Initialize the hardware to a configuration as specified by the
2336 * adapter structure. The controller is reset, the EEPROM is
2337 * verified, the MAC address is set, then the shared initialization
2338 * routines are called.
2340 **********************************************************************/
2342 ixgbe_hardware_init(struct adapter *adapter)
2344 device_t dev = adapter->dev;
2348 /* Issue a global reset */
2349 adapter->hw.adapter_stopped = FALSE;
2350 ixgbe_stop_adapter(&adapter->hw);
2352 /* Make sure we have a good EEPROM before we read from it */
2353 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
2354 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
2358 /* Get Hardware Flow Control setting */
2359 adapter->hw.fc.requested_mode = ixgbe_fc_full;
2360 adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
2361 adapter->hw.fc.low_water = IXGBE_FC_LO;
2362 adapter->hw.fc.high_water = IXGBE_FC_HI;
2363 adapter->hw.fc.send_xon = TRUE;
2365 if (ixgbe_init_hw(&adapter->hw)) {
2366 device_printf(dev,"Hardware Initialization Failed");
2373 /*********************************************************************
2375 * Setup networking device structure and register an interface.
2377 **********************************************************************/
2379 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2382 struct ixgbe_hw *hw = &adapter->hw;
2383 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2385 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2387 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
2388 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2389 ifp->if_mtu = ETHERMTU;
2390 ifp->if_baudrate = 1000000000;
2391 ifp->if_init = ixgbe_init;
2392 ifp->if_softc = adapter;
2393 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2394 ifp->if_ioctl = ixgbe_ioctl;
2395 ifp->if_start = ixgbe_start;
2397 ifp->if_watchdog = NULL;
2398 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
2400 ether_ifattach(ifp, adapter->hw.mac.addr);
2402 adapter->max_frame_size =
2403 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2406 * Tell the upper layer(s) we support long frames.
2408 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2410 ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4);
2411 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2412 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2414 ifp->if_capenable = ifp->if_capabilities;
2416 if (hw->device_id == IXGBE_DEV_ID_82598AT)
2417 ixgbe_setup_link_speed(hw, (IXGBE_LINK_SPEED_10GB_FULL |
2418 IXGBE_LINK_SPEED_1GB_FULL), TRUE, TRUE);
2420 ixgbe_setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL,
2424 * Specify the media types supported by this adapter and register
2425 * callbacks to update media and link information
2427 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2428 ixgbe_media_status);
2429 ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics |
2431 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2432 ifmedia_add(&adapter->media,
2433 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2434 ifmedia_add(&adapter->media,
2435 IFM_ETHER | IFM_1000_T, 0, NULL);
2437 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2438 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2443 /********************************************************************
2444 * Manage DMA'able memory.
2445 *******************************************************************/
2447 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2451 *(bus_addr_t *) arg = segs->ds_addr;
2456 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2457 struct ixgbe_dma_alloc *dma, int mapflags)
2459 device_t dev = adapter->dev;
2462 r = bus_dma_tag_create(NULL, /* parent */
2463 1, 0, /* alignment, bounds */
2464 BUS_SPACE_MAXADDR, /* lowaddr */
2465 BUS_SPACE_MAXADDR, /* highaddr */
2466 NULL, NULL, /* filter, filterarg */
2469 size, /* maxsegsize */
2470 BUS_DMA_ALLOCNOW, /* flags */
2471 NULL, /* lockfunc */
2472 NULL, /* lockfuncarg */
2475 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2479 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2480 BUS_DMA_NOWAIT, &dma->dma_map);
2482 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2486 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2490 mapflags | BUS_DMA_NOWAIT);
2492 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2496 dma->dma_size = size;
2499 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2501 bus_dma_tag_destroy(dma->dma_tag);
2503 dma->dma_map = NULL;
2504 dma->dma_tag = NULL;
2509 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2511 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2512 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2513 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2514 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2515 bus_dma_tag_destroy(dma->dma_tag);
2519 /*********************************************************************
2521 * Allocate memory for the transmit and receive rings, and then
2522 * the descriptors associated with each, called only once at attach.
2524 **********************************************************************/
2526 ixgbe_allocate_queues(struct adapter *adapter)
2528 device_t dev = adapter->dev;
2529 struct tx_ring *txr;
2530 struct rx_ring *rxr;
2531 int rsize, tsize, error = IXGBE_SUCCESS;
2532 int txconf = 0, rxconf = 0;
2534 /* First allocate the TX ring struct memory */
2535 if (!(adapter->tx_rings =
2536 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2537 adapter->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2538 device_printf(dev, "Unable to allocate TX ring memory\n");
2542 txr = adapter->tx_rings;
2544 /* Next allocate the RX */
2545 if (!(adapter->rx_rings =
2546 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2547 adapter->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2548 device_printf(dev, "Unable to allocate RX ring memory\n");
2552 rxr = adapter->rx_rings;
2554 /* For the ring itself */
2555 tsize = roundup2(adapter->num_tx_desc *
2556 sizeof(union ixgbe_adv_tx_desc), 4096);
2559 * Now set up the TX queues, txconf is needed to handle the
2560 * possibility that things fail midcourse and we need to
2561 * undo memory gracefully
2563 for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) {
2564 /* Set up some basics */
2565 txr = &adapter->tx_rings[i];
2566 txr->adapter = adapter;
2569 /* Initialize the TX side lock */
2570 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2571 device_get_nameunit(dev), txr->me);
2572 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2574 if (ixgbe_dma_malloc(adapter, tsize,
2575 &txr->txdma, BUS_DMA_NOWAIT)) {
2577 "Unable to allocate TX Descriptor memory\n");
2581 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2582 bzero((void *)txr->tx_base, tsize);
2584 /* Now allocate transmit buffers for the ring */
2585 if (ixgbe_allocate_transmit_buffers(txr)) {
2587 "Critical Failure setting up transmit buffers\n");
2595 * Next the RX queues...
2597 rsize = roundup2(adapter->num_rx_desc *
2598 sizeof(union ixgbe_adv_rx_desc), 4096);
2599 for (int i = 0; i < adapter->num_rx_queues; i++, rxconf++) {
2600 rxr = &adapter->rx_rings[i];
2601 /* Set up some basics */
2602 rxr->adapter = adapter;
2605 /* Initialize the RX side lock */
2606 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2607 device_get_nameunit(dev), rxr->me);
2608 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2610 if (ixgbe_dma_malloc(adapter, rsize,
2611 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2613 "Unable to allocate RxDescriptor memory\n");
2617 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2618 bzero((void *)rxr->rx_base, rsize);
2620 /* Allocate receive buffers for the ring*/
2621 if (ixgbe_allocate_receive_buffers(rxr)) {
2623 "Critical Failure setting up receive buffers\n");
2632 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2633 ixgbe_dma_free(adapter, &rxr->rxdma);
2635 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2636 ixgbe_dma_free(adapter, &txr->txdma);
2637 free(adapter->rx_rings, M_DEVBUF);
2639 free(adapter->tx_rings, M_DEVBUF);
2644 /*********************************************************************
2646 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2647 * the information needed to transmit a packet on the wire. This is
2648 * called only once at attach, setup is done every reset.
2650 **********************************************************************/
2652 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2654 struct adapter *adapter = txr->adapter;
2655 device_t dev = adapter->dev;
2656 struct ixgbe_tx_buf *txbuf;
2660 * Setup DMA descriptor areas.
2662 if ((error = bus_dma_tag_create(NULL, /* parent */
2663 1, 0, /* alignment, bounds */
2664 BUS_SPACE_MAXADDR, /* lowaddr */
2665 BUS_SPACE_MAXADDR, /* highaddr */
2666 NULL, NULL, /* filter, filterarg */
2667 IXGBE_TSO_SIZE, /* maxsize */
2668 ixgbe_num_segs, /* nsegments */
2669 PAGE_SIZE, /* maxsegsize */
2671 NULL, /* lockfunc */
2672 NULL, /* lockfuncarg */
2674 device_printf(dev,"Unable to allocate TX DMA tag\n");
2678 if (!(txr->tx_buffers =
2679 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2680 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2681 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2686 /* Create the descriptor buffer dma maps */
2687 txbuf = txr->tx_buffers;
2688 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2689 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2691 device_printf(dev, "Unable to create TX DMA map\n");
2698 /* We free all, it handles case where we are in the middle */
2699 ixgbe_free_transmit_structures(adapter);
2703 /*********************************************************************
2705 * Initialize a transmit ring.
2707 **********************************************************************/
2709 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2711 struct adapter *adapter = txr->adapter;
2712 struct ixgbe_tx_buf *txbuf;
2715 /* Clear the old ring contents */
2716 bzero((void *)txr->tx_base,
2717 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2719 txr->next_avail_tx_desc = 0;
2720 txr->next_tx_to_clean = 0;
2722 /* Free any existing tx buffers. */
2723 txbuf = txr->tx_buffers;
2724 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2725 if (txbuf->m_head != NULL) {
2726 bus_dmamap_sync(txr->txtag, txbuf->map,
2727 BUS_DMASYNC_POSTWRITE);
2728 bus_dmamap_unload(txr->txtag, txbuf->map);
2729 m_freem(txbuf->m_head);
2730 txbuf->m_head = NULL;
2732 /* Clear the EOP index */
2733 txbuf->eop_index = -1;
2736 /* Set number of descriptors available */
2737 txr->tx_avail = adapter->num_tx_desc;
2739 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2740 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2743 /*********************************************************************
2745 * Initialize all transmit rings.
2747 **********************************************************************/
2749 ixgbe_setup_transmit_structures(struct adapter *adapter)
2751 struct tx_ring *txr = adapter->tx_rings;
2753 for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
2754 ixgbe_setup_transmit_ring(txr);
2759 /*********************************************************************
2761 * Enable transmit unit.
2763 **********************************************************************/
2765 ixgbe_initialize_transmit_units(struct adapter *adapter)
2767 struct tx_ring *txr = adapter->tx_rings;
2768 struct ixgbe_hw *hw = &adapter->hw;
2770 /* Setup the Base and Length of the Tx Descriptor Ring */
2772 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2773 u64 tdba = txr->txdma.dma_paddr;
2775 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2776 (tdba & 0x00000000ffffffffULL));
2777 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2778 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2779 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2781 /* Setup the HW Tx Head and Tail descriptor pointers */
2782 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2783 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2785 /* Setup Transmit Descriptor Cmd Settings */
2786 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2788 txr->watchdog_timer = 0;
2791 if (hw->mac.type == ixgbe_mac_82599EB) {
2793 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2794 dmatxctl |= IXGBE_DMATXCTL_TE;
2795 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2801 /*********************************************************************
2803 * Free all transmit rings.
2805 **********************************************************************/
2807 ixgbe_free_transmit_structures(struct adapter *adapter)
2809 struct tx_ring *txr = adapter->tx_rings;
2811 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2813 ixgbe_free_transmit_buffers(txr);
2814 ixgbe_dma_free(adapter, &txr->txdma);
2815 IXGBE_TX_UNLOCK(txr);
2816 IXGBE_TX_LOCK_DESTROY(txr);
2818 free(adapter->tx_rings, M_DEVBUF);
2821 /*********************************************************************
2823 * Free transmit ring related data structures.
2825 **********************************************************************/
2827 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2829 struct adapter *adapter = txr->adapter;
2830 struct ixgbe_tx_buf *tx_buffer;
2833 INIT_DEBUGOUT("free_transmit_ring: begin");
2835 if (txr->tx_buffers == NULL)
2838 tx_buffer = txr->tx_buffers;
2839 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2840 if (tx_buffer->m_head != NULL) {
2841 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2842 BUS_DMASYNC_POSTWRITE);
2843 bus_dmamap_unload(txr->txtag,
2845 m_freem(tx_buffer->m_head);
2846 tx_buffer->m_head = NULL;
2847 if (tx_buffer->map != NULL) {
2848 bus_dmamap_destroy(txr->txtag,
2850 tx_buffer->map = NULL;
2852 } else if (tx_buffer->map != NULL) {
2853 bus_dmamap_unload(txr->txtag,
2855 bus_dmamap_destroy(txr->txtag,
2857 tx_buffer->map = NULL;
2861 if (txr->tx_buffers != NULL) {
2862 free(txr->tx_buffers, M_DEVBUF);
2863 txr->tx_buffers = NULL;
2865 if (txr->txtag != NULL) {
2866 bus_dma_tag_destroy(txr->txtag);
2872 /*********************************************************************
2874 * Advanced Context Descriptor setup for VLAN or CSUM
2876 **********************************************************************/
2879 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2881 struct adapter *adapter = txr->adapter;
2882 struct ixgbe_adv_tx_context_desc *TXD;
2883 struct ixgbe_tx_buf *tx_buffer;
2884 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2885 struct ether_vlan_header *eh;
2886 struct ip *ip = NULL;
2887 struct ip6_hdr *ip6;
2888 int ehdrlen, ip_hlen = 0;
2891 bool offload = FALSE;
2892 int ctxd = txr->next_avail_tx_desc;
2893 #if __FreeBSD_version < 700000
2900 if (mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2903 tx_buffer = &txr->tx_buffers[ctxd];
2904 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2907 ** In advanced descriptors the vlan tag must
2908 ** be placed into the descriptor itself.
2910 #if __FreeBSD_version < 700000
2911 mtag = VLAN_OUTPUT_TAG(ifp, mp);
2914 htole16(VLAN_TAG_VALUE(mtag)) << IXGBE_ADVTXD_VLAN_SHIFT;
2918 if (mp->m_flags & M_VLANTAG) {
2919 vtag = htole16(mp->m_pkthdr.ether_vtag);
2920 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2925 * Determine where frame payload starts.
2926 * Jump over vlan headers if already present,
2927 * helpful for QinQ too.
2929 eh = mtod(mp, struct ether_vlan_header *);
2930 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2931 etype = ntohs(eh->evl_proto);
2932 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2934 etype = ntohs(eh->evl_encap_proto);
2935 ehdrlen = ETHER_HDR_LEN;
2938 /* Set the ether header length */
2939 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2943 ip = (struct ip *)(mp->m_data + ehdrlen);
2944 ip_hlen = ip->ip_hl << 2;
2945 if (mp->m_len < ehdrlen + ip_hlen)
2946 return FALSE; /* failure */
2948 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2950 case ETHERTYPE_IPV6:
2951 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2952 ip_hlen = sizeof(struct ip6_hdr);
2953 if (mp->m_len < ehdrlen + ip_hlen)
2954 return FALSE; /* failure */
2955 ipproto = ip6->ip6_nxt;
2956 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2958 #ifdef IXGBE_TIMESYNC
2959 case ETHERTYPE_IEEE1588:
2960 return (IXGBE_TIMESTAMP);
2966 vlan_macip_lens |= ip_hlen;
2967 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2971 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2972 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2979 #ifdef IXGBE_TIMESYNC
2980 void *hdr = (caddr_t) ip + ip_hlen;
2981 struct udphdr *uh = (struct udphdr *)hdr;
2983 if (uh->uh_dport == htons(TSYNC_UDP_PORT))
2984 return (IXGBE_TIMESTAMP);
2986 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2987 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2998 if (offload != TRUE )
3001 /* Now copy bits into descriptor */
3002 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3003 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3004 TXD->seqnum_seed = htole32(0);
3005 TXD->mss_l4len_idx = htole32(0);
3007 tx_buffer->m_head = NULL;
3008 tx_buffer->eop_index = -1;
3010 /* We've consumed the first desc, adjust counters */
3011 if (++ctxd == adapter->num_tx_desc)
3013 txr->next_avail_tx_desc = ctxd;
3019 #if __FreeBSD_version >= 700000
3020 /**********************************************************************
3022 * Setup work for hardware segmentation offload (TSO) on
3023 * adapters using advanced tx descriptors
3025 **********************************************************************/
3027 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3029 struct adapter *adapter = txr->adapter;
3030 struct ixgbe_adv_tx_context_desc *TXD;
3031 struct ixgbe_tx_buf *tx_buffer;
3032 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3033 u32 mss_l4len_idx = 0;
3035 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
3036 struct ether_vlan_header *eh;
3042 * Determine where frame payload starts.
3043 * Jump over vlan headers if already present
3045 eh = mtod(mp, struct ether_vlan_header *);
3046 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3047 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3049 ehdrlen = ETHER_HDR_LEN;
3051 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3052 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3055 ctxd = txr->next_avail_tx_desc;
3056 tx_buffer = &txr->tx_buffers[ctxd];
3057 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3059 ip = (struct ip *)(mp->m_data + ehdrlen);
3060 if (ip->ip_p != IPPROTO_TCP)
3061 return FALSE; /* 0 */
3063 ip_hlen = ip->ip_hl << 2;
3064 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3065 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3066 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3067 tcp_hlen = th->th_off << 2;
3068 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3070 /* This is used in the transmit desc in encap */
3071 *paylen = mp->m_pkthdr.len - hdrlen;
3073 /* VLAN MACLEN IPLEN */
3074 if (mp->m_flags & M_VLANTAG) {
3075 vtag = htole16(mp->m_pkthdr.ether_vtag);
3076 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3079 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3080 vlan_macip_lens |= ip_hlen;
3081 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3083 /* ADV DTYPE TUCMD */
3084 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3085 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3086 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3087 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3091 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3092 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3093 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3095 TXD->seqnum_seed = htole32(0);
3096 tx_buffer->m_head = NULL;
3097 tx_buffer->eop_index = -1;
3099 if (++ctxd == adapter->num_tx_desc)
3103 txr->next_avail_tx_desc = ctxd;
3107 #else /* For 6.2 RELEASE */
3108 /* This makes it easy to keep the code common */
3110 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3116 /**********************************************************************
3118 * Examine each tx_buffer in the used queue. If the hardware is done
3119 * processing the packet then free associated resources. The
3120 * tx_buffer is put back on the free queue.
3122 **********************************************************************/
3124 ixgbe_txeof(struct tx_ring *txr)
3126 struct adapter * adapter = txr->adapter;
3127 struct ifnet *ifp = adapter->ifp;
3128 u32 first, last, done, num_avail;
3130 struct ixgbe_tx_buf *tx_buffer;
3131 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3133 mtx_assert(&txr->tx_mtx, MA_OWNED);
3135 if (txr->tx_avail == adapter->num_tx_desc)
3138 num_avail = txr->tx_avail;
3139 first = txr->next_tx_to_clean;
3141 tx_buffer = &txr->tx_buffers[first];
3142 /* For cleanup we just use legacy struct */
3143 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3144 last = tx_buffer->eop_index;
3148 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3150 ** Get the index of the first descriptor
3151 ** BEYOND the EOP and call that 'done'.
3152 ** I do this so the comparison in the
3153 ** inner while loop below can be simple
3155 if (++last == adapter->num_tx_desc) last = 0;
3158 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3159 BUS_DMASYNC_POSTREAD);
3161 ** Only the EOP descriptor of a packet now has the DD
3162 ** bit set, this is what we look for...
3164 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3165 /* We clean the range of the packet */
3166 while (first != done) {
3167 tx_desc->upper.data = 0;
3168 tx_desc->lower.data = 0;
3169 tx_desc->buffer_addr = 0;
3170 num_avail++; cleaned++;
3172 if (tx_buffer->m_head) {
3174 bus_dmamap_sync(txr->txtag,
3176 BUS_DMASYNC_POSTWRITE);
3177 bus_dmamap_unload(txr->txtag,
3179 m_freem(tx_buffer->m_head);
3180 tx_buffer->m_head = NULL;
3181 tx_buffer->map = NULL;
3183 tx_buffer->eop_index = -1;
3185 if (++first == adapter->num_tx_desc)
3188 tx_buffer = &txr->tx_buffers[first];
3190 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3192 /* See if there is more work now */
3193 last = tx_buffer->eop_index;
3196 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3197 /* Get next done point */
3198 if (++last == adapter->num_tx_desc) last = 0;
3203 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3204 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3206 txr->next_tx_to_clean = first;
3209 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
3210 * it is OK to send packets. If there are no pending descriptors,
3211 * clear the timeout. Otherwise, if some descriptors have been freed,
3212 * restart the timeout.
3214 if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
3215 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3216 /* If all are clean turn off the timer */
3217 if (num_avail == adapter->num_tx_desc) {
3218 txr->watchdog_timer = 0;
3219 txr->tx_avail = num_avail;
3224 /* Some were cleaned, so reset timer */
3226 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
3227 txr->tx_avail = num_avail;
3231 /*********************************************************************
3233 * Get a buffer from system mbuf buffer pool.
3235 **********************************************************************/
3237 ixgbe_get_buf(struct rx_ring *rxr, int i, u8 clean)
3239 struct adapter *adapter = rxr->adapter;
3240 bus_dma_segment_t seg[2];
3241 struct ixgbe_rx_buf *rxbuf;
3242 struct mbuf *mh, *mp;
3248 rxbuf = &rxr->rx_buffers[i];
3250 /* First get our header and payload mbuf */
3251 if (clean & IXGBE_CLEAN_HDR) {
3252 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3256 mh = rxr->rx_buffers[i].m_head;
3259 mh->m_flags |= M_PKTHDR;
3261 if (clean & IXGBE_CLEAN_PKT) {
3262 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3263 M_PKTHDR, adapter->rx_mbuf_sz);
3266 mp->m_len = adapter->rx_mbuf_sz;
3267 mp->m_flags &= ~M_PKTHDR;
3268 } else { /* reusing */
3269 mp = rxr->rx_buffers[i].m_pack;
3270 mp->m_len = adapter->rx_mbuf_sz;
3271 mp->m_flags &= ~M_PKTHDR;
3274 ** Need to create a chain for the following
3275 ** dmamap call at this point.
3278 mh->m_pkthdr.len = mh->m_len + mp->m_len;
3280 /* Get the memory mapping */
3281 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3282 rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3284 printf("GET BUF: dmamap load failure - %d\n", error);
3289 /* Unload old mapping and update buffer struct */
3290 if (rxbuf->m_head != NULL)
3291 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3293 rxbuf->map = rxr->spare_map;
3294 rxr->spare_map = map;
3297 bus_dmamap_sync(rxr->rxtag,
3298 rxbuf->map, BUS_DMASYNC_PREREAD);
3300 /* Update descriptor */
3301 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3302 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3307 ** If we get here, we have an mbuf resource
3308 ** issue, so we discard the incoming packet
3309 ** and attempt to reuse existing mbufs next
3310 ** pass thru the ring, but to do so we must
3311 ** fix up the descriptor which had the address
3312 ** clobbered with writeback info.
3315 adapter->mbuf_header_failed++;
3317 /* Is there a reusable buffer? */
3318 mh = rxr->rx_buffers[i].m_head;
3319 if (mh == NULL) /* Nope, init error */
3321 mp = rxr->rx_buffers[i].m_pack;
3322 if (mp == NULL) /* Nope, init error */
3324 /* Get our old mapping */
3325 rxbuf = &rxr->rx_buffers[i];
3326 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3327 rxbuf->map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3329 /* We really have a problem */
3333 /* Now fix the descriptor as needed */
3334 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3335 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3341 /*********************************************************************
3343 * Allocate memory for rx_buffer structures. Since we use one
3344 * rx_buffer per received packet, the maximum number of rx_buffer's
3345 * that we'll need is equal to the number of receive descriptors
3346 * that we've allocated.
3348 **********************************************************************/
3350 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3352 struct adapter *adapter = rxr->adapter;
3353 device_t dev = adapter->dev;
3354 struct ixgbe_rx_buf *rxbuf;
3355 int i, bsize, error;
3357 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3358 if (!(rxr->rx_buffers =
3359 (struct ixgbe_rx_buf *) malloc(bsize,
3360 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3361 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3367 ** The tag is made to accomodate the largest buffer size
3368 ** with packet split (hence the two segments, even though
3369 ** it may not always use this.
3371 if ((error = bus_dma_tag_create(NULL, /* parent */
3372 1, 0, /* alignment, bounds */
3373 BUS_SPACE_MAXADDR, /* lowaddr */
3374 BUS_SPACE_MAXADDR, /* highaddr */
3375 NULL, NULL, /* filter, filterarg */
3376 MJUM16BYTES, /* maxsize */
3378 MJUMPAGESIZE, /* maxsegsize */
3380 NULL, /* lockfunc */
3381 NULL, /* lockfuncarg */
3383 device_printf(dev, "Unable to create RX DMA tag\n");
3387 /* Create the spare map (used by getbuf) */
3388 error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
3391 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3396 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3397 rxbuf = &rxr->rx_buffers[i];
3398 error = bus_dmamap_create(rxr->rxtag,
3399 BUS_DMA_NOWAIT, &rxbuf->map);
3401 device_printf(dev, "Unable to create RX DMA map\n");
3409 /* Frees all, but can handle partial completion */
3410 ixgbe_free_receive_structures(adapter);
3414 /*********************************************************************
3416 * Initialize a receive ring and its buffers.
3418 **********************************************************************/
3420 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3422 struct adapter *adapter;
3424 struct ixgbe_rx_buf *rxbuf;
3425 struct lro_ctrl *lro = &rxr->lro;
3428 adapter = rxr->adapter;
3431 /* Clear the ring contents */
3432 rsize = roundup2(adapter->num_rx_desc *
3433 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3434 bzero((void *)rxr->rx_base, rsize);
3437 ** Free current RX buffer structs and their mbufs
3439 for (int i = 0; i < adapter->num_rx_desc; i++) {
3440 rxbuf = &rxr->rx_buffers[i];
3441 if (rxbuf->m_head != NULL) {
3442 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3443 BUS_DMASYNC_POSTREAD);
3444 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3445 if (rxbuf->m_head) {
3446 rxbuf->m_head->m_next = rxbuf->m_pack;
3447 m_freem(rxbuf->m_head);
3449 rxbuf->m_head = NULL;
3450 rxbuf->m_pack = NULL;
3454 /* Now refresh the mbufs */
3455 for (j = 0; j < adapter->num_rx_desc; j++) {
3456 if (ixgbe_get_buf(rxr, j, IXGBE_CLEAN_ALL) == ENOBUFS) {
3457 rxr->rx_buffers[j].m_head = NULL;
3458 rxr->rx_buffers[j].m_pack = NULL;
3459 rxr->rx_base[j].read.hdr_addr = 0;
3460 rxr->rx_base[j].read.pkt_addr = 0;
3465 /* Setup our descriptor indices */
3466 rxr->next_to_check = 0;
3467 rxr->last_cleaned = 0;
3469 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3470 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3472 /* Now set up the LRO interface */
3473 if (ixgbe_enable_lro) {
3474 int err = tcp_lro_init(lro);
3476 INIT_DEBUGOUT("LRO Initialization failed!\n");
3479 INIT_DEBUGOUT("RX LRO Initialized\n");
3480 lro->ifp = adapter->ifp;
3487 * We need to clean up any buffers allocated
3488 * so far, 'j' is the failing index.
3490 for (int i = 0; i < j; i++) {
3491 rxbuf = &rxr->rx_buffers[i];
3492 if (rxbuf->m_head != NULL) {
3493 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3494 BUS_DMASYNC_POSTREAD);
3495 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3496 m_freem(rxbuf->m_head);
3497 rxbuf->m_head = NULL;
3503 /*********************************************************************
3505 * Initialize all receive rings.
3507 **********************************************************************/
3509 ixgbe_setup_receive_structures(struct adapter *adapter)
3511 struct rx_ring *rxr = adapter->rx_rings;
3514 for (j = 0; j < adapter->num_rx_queues; j++, rxr++)
3515 if (ixgbe_setup_receive_ring(rxr))
3521 * Free RX buffers allocated so far, we will only handle
3522 * the rings that completed, the failing case will have
3523 * cleaned up for itself. 'j' failed, so its the terminus.
3525 for (int i = 0; i < j; ++i) {
3526 rxr = &adapter->rx_rings[i];
3527 for (int n = 0; n < adapter->num_rx_desc; n++) {
3528 struct ixgbe_rx_buf *rxbuf;
3529 rxbuf = &rxr->rx_buffers[n];
3530 if (rxbuf->m_head != NULL) {
3531 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3532 BUS_DMASYNC_POSTREAD);
3533 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3534 m_freem(rxbuf->m_head);
3535 rxbuf->m_head = NULL;
3543 /*********************************************************************
3545 * Setup receive registers and features.
3547 **********************************************************************/
3548 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3551 ixgbe_initialize_receive_units(struct adapter *adapter)
3553 struct rx_ring *rxr = adapter->rx_rings;
3554 struct ixgbe_hw *hw = &adapter->hw;
3555 struct ifnet *ifp = adapter->ifp;
3556 u32 rxctrl, fctrl, srrctl, rxcsum;
3557 u32 reta, mrqc = 0, hlreg, random[10];
3561 * Make sure receives are disabled while
3562 * setting up the descriptor ring
3564 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3565 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3566 rxctrl & ~IXGBE_RXCTRL_RXEN);
3568 /* Enable broadcasts */
3569 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3570 fctrl |= IXGBE_FCTRL_BAM;
3571 fctrl |= IXGBE_FCTRL_DPF;
3572 fctrl |= IXGBE_FCTRL_PMCF;
3573 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3575 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(0));
3576 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3577 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3579 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3580 /* Set for Jumbo Frames? */
3581 if (ifp->if_mtu > ETHERMTU) {
3582 hlreg |= IXGBE_HLREG0_JUMBOEN;
3583 srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3585 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3586 srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3588 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3590 if (ixgbe_rx_hdr_split) {
3591 /* Use a standard mbuf for the header */
3592 srrctl |= ((IXGBE_RX_HDR << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3593 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3594 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3595 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3596 /* PSRTYPE must be initialized in 82599 */
3597 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3598 IXGBE_PSRTYPE_UDPHDR |
3599 IXGBE_PSRTYPE_IPV4HDR |
3600 IXGBE_PSRTYPE_IPV6HDR;
3601 psrtype |= (7 << 29);
3602 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3605 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3607 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
3608 srrctl |= IXGBE_SRRCTL_DROP_EN;
3610 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(0), srrctl);
3612 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3613 u64 rdba = rxr->rxdma.dma_paddr;
3614 /* Setup the Base and Length of the Rx Descriptor Ring */
3615 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3616 (rdba & 0x00000000ffffffffULL));
3617 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
3618 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3619 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3621 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3622 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3623 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3626 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3629 if (adapter->num_rx_queues > 1) {
3633 /* set up random bits */
3634 arc4rand(&random, sizeof(random), 0);
3636 /* Set up the redirection table */
3637 for (i = 0, j = 0; i < 128; i++, j++) {
3638 if (j == adapter->num_rx_queues) j = 0;
3639 reta = (reta << 8) | (j * 0x11);
3641 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3644 /* Now fill our hash function seeds */
3645 for (int i = 0; i < 10; i++)
3646 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3648 /* Perform hash on these packet types */
3649 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
3650 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3652 mrqc |= IXGBE_MRQC_RSSEN
3653 | IXGBE_MRQC_RSS_FIELD_IPV4
3654 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3655 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3656 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3657 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3658 | IXGBE_MRQC_RSS_FIELD_IPV6
3659 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3660 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3661 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3662 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3664 /* RSS and RX IPP Checksum are mutually exclusive */
3665 rxcsum |= IXGBE_RXCSUM_PCSD;
3668 if (ifp->if_capenable & IFCAP_RXCSUM)
3669 rxcsum |= IXGBE_RXCSUM_PCSD;
3671 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3672 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3674 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3676 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3677 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3678 rdrxctl |= IXGBE_RDRXCTL_AGGDIS;
3679 rdrxctl |= IXGBE_RDRXCTL_RSCLLIDIS;
3680 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3681 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3687 /*********************************************************************
3689 * Free all receive rings.
3691 **********************************************************************/
3693 ixgbe_free_receive_structures(struct adapter *adapter)
3695 struct rx_ring *rxr = adapter->rx_rings;
3697 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3698 struct lro_ctrl *lro = &rxr->lro;
3699 ixgbe_free_receive_buffers(rxr);
3700 /* Free LRO memory */
3702 /* Free the ring memory as well */
3703 ixgbe_dma_free(adapter, &rxr->rxdma);
3706 free(adapter->rx_rings, M_DEVBUF);
3709 /*********************************************************************
3711 * Free receive ring data structures
3713 **********************************************************************/
3715 ixgbe_free_receive_buffers(struct rx_ring *rxr)
3717 struct adapter *adapter = NULL;
3718 struct ixgbe_rx_buf *rxbuf = NULL;
3720 INIT_DEBUGOUT("free_receive_buffers: begin");
3721 adapter = rxr->adapter;
3722 if (rxr->rx_buffers != NULL) {
3723 rxbuf = &rxr->rx_buffers[0];
3724 for (int i = 0; i < adapter->num_rx_desc; i++) {
3725 if (rxbuf->map != NULL) {
3726 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3727 BUS_DMASYNC_POSTREAD);
3728 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3729 bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
3731 if (rxbuf->m_head != NULL) {
3732 m_freem(rxbuf->m_head);
3734 rxbuf->m_head = NULL;
3738 if (rxr->rx_buffers != NULL) {
3739 free(rxr->rx_buffers, M_DEVBUF);
3740 rxr->rx_buffers = NULL;
3742 if (rxr->rxtag != NULL) {
3743 bus_dma_tag_destroy(rxr->rxtag);
3749 /*********************************************************************
3751 * This routine executes in interrupt context. It replenishes
3752 * the mbufs in the descriptor and sends data which has been
3753 * dma'ed into host memory to upper layer.
3755 * We loop at most count times if count is > 0, or until done if
3758 * Return TRUE for more work, FALSE for all clean.
3759 *********************************************************************/
3761 ixgbe_rxeof(struct rx_ring *rxr, int count)
3763 struct adapter *adapter = rxr->adapter;
3764 struct ifnet *ifp = adapter->ifp;
3765 struct lro_ctrl *lro = &rxr->lro;
3766 struct lro_entry *queued;
3769 union ixgbe_adv_rx_desc *cur;
3773 i = rxr->next_to_check;
3774 cur = &rxr->rx_base[i];
3775 staterr = cur->wb.upper.status_error;
3777 if (!(staterr & IXGBE_RXD_STAT_DD)) {
3778 IXGBE_RX_UNLOCK(rxr);
3783 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3784 BUS_DMASYNC_POSTREAD);
3786 while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
3787 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3788 struct mbuf *sendmp, *mh, *mp;
3789 u16 hlen, plen, hdr;
3790 u8 dopayload, accept_frame, eop;
3795 sendmp = mh = mp = NULL;
3797 /* Sync the buffers */
3798 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
3799 BUS_DMASYNC_POSTREAD);
3802 ** The way the hardware is configured to
3803 ** split, it will ONLY use the header buffer
3804 ** when header split is enabled, otherwise we
3805 ** get normal behavior, ie, both header and
3806 ** payload are DMA'd into the payload buffer.
3808 ** The fmp test is to catch the case where a
3809 ** packet spans multiple descriptors, in that
3810 ** case only the first header is valid.
3812 if ((ixgbe_rx_hdr_split) && (rxr->fmp == NULL)){
3814 wb.lower.lo_dword.hs_rss.hdr_info);
3815 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3816 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3817 if (hlen > IXGBE_RX_HDR)
3818 hlen = IXGBE_RX_HDR;
3819 plen = le16toh(cur->wb.upper.length);
3820 /* Handle the header mbuf */
3821 mh = rxr->rx_buffers[i].m_head;
3823 dopayload = IXGBE_CLEAN_HDR;
3825 ** Get the payload length, this
3826 ** could be zero if its a small
3830 mp = rxr->rx_buffers[i].m_pack;
3833 mp->m_flags &= ~M_PKTHDR;
3835 mh->m_flags |= M_PKTHDR;
3836 dopayload = IXGBE_CLEAN_ALL;
3837 rxr->rx_split_packets++;
3838 } else { /* small packets */
3839 mh->m_flags &= ~M_PKTHDR;
3844 ** Either no header split, or a
3845 ** secondary piece of a fragmented
3848 mh = rxr->rx_buffers[i].m_pack;
3849 mh->m_flags |= M_PKTHDR;
3850 mh->m_len = le16toh(cur->wb.upper.length);
3851 dopayload = IXGBE_CLEAN_PKT;
3854 if (staterr & IXGBE_RXD_STAT_EOP) {
3860 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
3864 if (ixgbe_get_buf(rxr, i, dopayload) != 0) {
3868 /* Initial frame - setup */
3869 if (rxr->fmp == NULL) {
3870 mh->m_flags |= M_PKTHDR;
3871 mh->m_pkthdr.len = mh->m_len;
3872 rxr->fmp = mh; /* Store the first mbuf */
3874 if (mp) { /* Add payload if split */
3875 mh->m_pkthdr.len += mp->m_len;
3876 rxr->lmp = mh->m_next;
3879 /* Chain mbuf's together */
3880 mh->m_flags &= ~M_PKTHDR;
3881 rxr->lmp->m_next = mh;
3882 rxr->lmp = rxr->lmp->m_next;
3883 rxr->fmp->m_pkthdr.len += mh->m_len;
3887 rxr->fmp->m_pkthdr.rcvif = ifp;
3890 /* capture data for AIM */
3891 rxr->bytes += rxr->fmp->m_pkthdr.len;
3892 rxr->rx_bytes += rxr->bytes;
3893 if (ifp->if_capenable & IFCAP_RXCSUM)
3894 ixgbe_rx_checksum(staterr, rxr->fmp);
3896 rxr->fmp->m_pkthdr.csum_flags = 0;
3897 if (staterr & IXGBE_RXD_STAT_VP) {
3898 #if __FreeBSD_version >= 700000
3899 rxr->fmp->m_pkthdr.ether_vtag =
3900 le16toh(cur->wb.upper.vlan);
3901 rxr->fmp->m_flags |= M_VLANTAG;
3903 VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
3904 (le16toh(cur->wb.upper.vlan) &
3905 IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
3915 /* Reuse loaded DMA map and just update mbuf chain */
3917 mh = rxr->rx_buffers[i].m_head;
3921 mp = rxr->rx_buffers[i].m_pack;
3922 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
3923 mp->m_data = mp->m_ext.ext_buf;
3925 if (adapter->max_frame_size <=
3926 (MCLBYTES - ETHER_ALIGN))
3927 m_adj(mp, ETHER_ALIGN);
3928 if (rxr->fmp != NULL) {
3929 /* handles the whole chain */
3936 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3937 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3939 rxr->last_cleaned = i; /* for updating tail */
3941 if (++i == adapter->num_rx_desc)
3945 ** Now send up to the stack,
3946 ** note the the value of next_to_check
3947 ** is safe because we keep the RX lock
3950 if (sendmp != NULL) {
3951 /* Use LRO if possible */
3952 if ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0)))
3953 (*ifp->if_input)(ifp, sendmp);
3956 /* Get next descriptor */
3957 cur = &rxr->rx_base[i];
3958 staterr = cur->wb.upper.status_error;
3960 rxr->next_to_check = i;
3962 /* Advance the IXGB's Receive Queue "Tail Pointer" */
3963 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
3966 * Flush any outstanding LRO work
3968 while (!SLIST_EMPTY(&lro->lro_active)) {
3969 queued = SLIST_FIRST(&lro->lro_active);
3970 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3971 tcp_lro_flush(lro, queued);
3974 IXGBE_RX_UNLOCK(rxr);
3977 ** Leaving with more to clean?
3978 ** then schedule another interrupt.
3980 if (staterr & IXGBE_RXD_STAT_DD) {
3981 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, rxr->eims);
3988 /*********************************************************************
3990 * Verify that the hardware indicated that the checksum is valid.
3991 * Inform the stack about the status of checksum so that stack
3992 * doesn't spend time verifying the checksum.
3994 *********************************************************************/
3996 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
3998 u16 status = (u16) staterr;
3999 u8 errors = (u8) (staterr >> 24);
4001 if (status & IXGBE_RXD_STAT_IPCS) {
4003 if (!(errors & IXGBE_RXD_ERR_IPE)) {
4004 /* IP Checksum Good */
4005 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4006 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4009 mp->m_pkthdr.csum_flags = 0;
4011 if (status & IXGBE_RXD_STAT_L4CS) {
4013 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
4014 mp->m_pkthdr.csum_flags |=
4015 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4016 mp->m_pkthdr.csum_data = htons(0xffff);
4023 #ifdef IXGBE_HW_VLAN_SUPPORT
4025 * This routine is run via an vlan
4029 ixgbe_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4031 struct adapter *adapter = ifp->if_softc;
4032 u32 ctrl, rctl, index, vfta;
4034 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
4035 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
4036 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
4037 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
4039 /* Make entry in the hardware filter table */
4040 ixgbe_set_vfta(&adapter->hw, vtag, 0, TRUE);
4044 * This routine is run via an vlan
4048 ixgbe_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4050 struct adapter *adapter = ifp->if_softc;
4053 /* Remove entry in the hardware filter table */
4054 ixgbe_set_vfta(&adapter->hw, vtag, 0, FALSE);
4056 /* Have all vlans unregistered? */
4057 if (adapter->ifp->if_vlantrunk == NULL) {
4059 /* Turn off the filter table */
4060 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
4061 ctrl &= ~IXGBE_VLNCTRL_VME;
4062 ctrl &= ~IXGBE_VLNCTRL_VFE;
4063 ctrl |= IXGBE_VLNCTRL_CFIEN;
4064 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
4070 ixgbe_enable_intr(struct adapter *adapter)
4072 struct ixgbe_hw *hw = &adapter->hw;
4073 u32 mask = IXGBE_EIMS_ENABLE_MASK;
4076 /* Enable Fan Failure detection */
4077 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4078 mask |= IXGBE_EIMS_GPI_SDP1;
4080 /* 82599 specific interrupts */
4081 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4082 mask |= IXGBE_EIMS_ECC;
4083 mask |= IXGBE_EIMS_GPI_SDP1;
4084 mask |= IXGBE_EIMS_GPI_SDP2;
4087 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4089 /* With RSS we use auto clear */
4090 if (adapter->msix_mem) {
4091 mask = IXGBE_EIMS_ENABLE_MASK;
4092 /* Dont autoclear Link */
4093 mask &= ~IXGBE_EIMS_OTHER;
4094 mask &= ~IXGBE_EIMS_LSC;
4095 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4098 IXGBE_WRITE_FLUSH(hw);
4104 ixgbe_disable_intr(struct adapter *adapter)
4106 if (adapter->msix_mem)
4107 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4108 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
4109 IXGBE_WRITE_FLUSH(&adapter->hw);
4114 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
4118 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
4125 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
4127 pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
4134 ** Setup the correct IVAR register for a particular MSIX interrupt
4135 ** (yes this is all very magic and confusing :)
4136 ** - entry is the register array entry
4137 ** - vector is the MSIX vector for this queue
4138 ** - type is RX/TX/MISC
4141 ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector, s8 type)
4143 struct ixgbe_hw *hw = &adapter->hw;
4146 vector |= IXGBE_IVAR_ALLOC_VAL;
4148 switch (hw->mac.type) {
4150 case ixgbe_mac_82598EB:
4152 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4154 entry += (type * 64);
4155 index = (entry >> 2) & 0x1F;
4156 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4157 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4158 ivar |= (vector << (8 * (entry & 0x3)));
4159 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4162 case ixgbe_mac_82599EB:
4163 if (type == -1) { /* MISC IVAR */
4164 index = (entry & 1) * 8;
4165 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4166 ivar &= ~(0xFF << index);
4167 ivar |= (vector << index);
4168 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4169 } else { /* RX/TX IVARS */
4170 index = (16 * (entry & 1)) + (8 * type);
4171 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4172 ivar &= ~(0xFF << index);
4173 ivar |= (vector << index);
4174 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4183 ixgbe_configure_ivars(struct adapter *adapter)
4185 struct tx_ring *txr = adapter->tx_rings;
4186 struct rx_ring *rxr = adapter->rx_rings;
4188 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++)
4189 ixgbe_set_ivar(adapter, i, rxr->msix, 0);
4191 for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
4192 ixgbe_set_ivar(adapter, i, txr->msix, 1);
4194 /* For the Link interrupt */
4195 ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
4199 ** ixgbe_sfp_probe - called in the local timer to
4200 ** determine if a port had optics inserted.
4202 static bool ixgbe_sfp_probe(struct adapter *adapter)
4204 struct ixgbe_hw *hw = &adapter->hw;
4205 device_t dev = adapter->dev;
4206 bool result = FALSE;
4208 if ((hw->phy.type == ixgbe_phy_nl) &&
4209 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4210 s32 ret = hw->phy.ops.identify_sfp(hw);
4213 ret = hw->phy.ops.reset(hw);
4214 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4215 device_printf(dev,"Unsupported SFP+ module detected!");
4216 printf(" Reload driver with supported module.\n");
4217 adapter->sfp_probe = FALSE;
4220 device_printf(dev,"SFP+ module detected!\n");
4221 /* We now have supported optics */
4222 adapter->sfp_probe = FALSE;
4230 ** Tasklet handler for MSIX Link interrupts
4231 ** - do outside interrupt since it might sleep
4234 ixgbe_handle_link(void *context, int pending)
4236 struct adapter *adapter = context;
4238 ixgbe_check_link(&adapter->hw,
4239 &adapter->link_speed, &adapter->link_up, 0);
4240 ixgbe_update_link_status(adapter);
4244 ** Tasklet for handling SFP module interrupts
4247 ixgbe_handle_mod(void *context, int pending)
4249 struct adapter *adapter = context;
4250 struct ixgbe_hw *hw = &adapter->hw;
4251 device_t dev = adapter->dev;
4254 err = hw->phy.ops.identify_sfp(hw);
4255 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4257 "Unsupported SFP+ module type was detected.\n");
4260 hw->mac.ops.setup_sfp(hw);
4261 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
4267 ** Tasklet for handling MSF (multispeed fiber) interrupts
4270 ixgbe_handle_msf(void *context, int pending)
4272 struct adapter *adapter = context;
4273 struct ixgbe_hw *hw = &adapter->hw;
4276 if (hw->mac.ops.get_link_capabilities)
4277 hw->mac.ops.get_link_capabilities(hw, &autoneg,
4279 if (hw->mac.ops.setup_link_speed)
4280 hw->mac.ops.setup_link_speed(hw, autoneg, TRUE, TRUE);
4281 ixgbe_check_link(&adapter->hw,
4282 &adapter->link_speed, &adapter->link_up, 0);
4283 ixgbe_update_link_status(adapter);
4287 /**********************************************************************
4289 * Update the board statistics counters.
4291 **********************************************************************/
4293 ixgbe_update_stats_counters(struct adapter *adapter)
4295 struct ifnet *ifp = adapter->ifp;;
4296 struct ixgbe_hw *hw = &adapter->hw;
4297 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4299 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4301 for (int i = 0; i < 8; i++) {
4303 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4305 adapter->stats.mpc[i] += mp;
4306 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4309 /* Hardware workaround, gprc counts missed packets */
4310 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4311 adapter->stats.gprc -= missed_rx;
4313 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4314 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4315 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4318 * Workaround: mprc hardware is incorrectly counting
4319 * broadcasts, so for now we subtract those.
4321 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4322 adapter->stats.bprc += bprc;
4323 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4324 adapter->stats.mprc -= bprc;
4326 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4327 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4328 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4329 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4330 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4331 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4332 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4333 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4335 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4336 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4338 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4339 adapter->stats.lxontxc += lxon;
4340 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4341 adapter->stats.lxofftxc += lxoff;
4342 total = lxon + lxoff;
4344 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4345 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4346 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4347 adapter->stats.gptc -= total;
4348 adapter->stats.mptc -= total;
4349 adapter->stats.ptc64 -= total;
4350 adapter->stats.gotc -= total * ETHER_MIN_LEN;
4352 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4353 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4354 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4355 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4356 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4357 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4358 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4359 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4360 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4361 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4364 /* Fill out the OS statistics structure */
4365 ifp->if_ipackets = adapter->stats.gprc;
4366 ifp->if_opackets = adapter->stats.gptc;
4367 ifp->if_ibytes = adapter->stats.gorc;
4368 ifp->if_obytes = adapter->stats.gotc;
4369 ifp->if_imcasts = adapter->stats.mprc;
4370 ifp->if_collisions = 0;
4373 ifp->if_ierrors = missed_rx + adapter->stats.crcerrs +
4374 adapter->stats.rlec;
4378 /**********************************************************************
4380 * This routine is called only when ixgbe_display_debug_stats is enabled.
4381 * This routine provides a way to take a look at important statistics
4382 * maintained by the driver and hardware.
4384 **********************************************************************/
4386 ixgbe_print_hw_stats(struct adapter * adapter)
4388 device_t dev = adapter->dev;
4391 device_printf(dev,"Std Mbuf Failed = %lu\n",
4392 adapter->mbuf_defrag_failed);
4393 device_printf(dev,"Missed Packets = %llu\n",
4394 (long long)adapter->stats.mpc[0]);
4395 device_printf(dev,"Receive length errors = %llu\n",
4396 ((long long)adapter->stats.roc +
4397 (long long)adapter->stats.ruc));
4398 device_printf(dev,"Crc errors = %llu\n",
4399 (long long)adapter->stats.crcerrs);
4400 device_printf(dev,"Driver dropped packets = %lu\n",
4401 adapter->dropped_pkts);
4402 device_printf(dev, "watchdog timeouts = %ld\n",
4403 adapter->watchdog_events);
4405 device_printf(dev,"XON Rcvd = %llu\n",
4406 (long long)adapter->stats.lxonrxc);
4407 device_printf(dev,"XON Xmtd = %llu\n",
4408 (long long)adapter->stats.lxontxc);
4409 device_printf(dev,"XOFF Rcvd = %llu\n",
4410 (long long)adapter->stats.lxoffrxc);
4411 device_printf(dev,"XOFF Xmtd = %llu\n",
4412 (long long)adapter->stats.lxofftxc);
4414 device_printf(dev,"Total Packets Rcvd = %llu\n",
4415 (long long)adapter->stats.tpr);
4416 device_printf(dev,"Good Packets Rcvd = %llu\n",
4417 (long long)adapter->stats.gprc);
4418 device_printf(dev,"Good Packets Xmtd = %llu\n",
4419 (long long)adapter->stats.gptc);
4420 device_printf(dev,"TSO Transmissions = %lu\n",
4426 /**********************************************************************
4428 * This routine is called only when em_display_debug_stats is enabled.
4429 * This routine provides a way to take a look at important statistics
4430 * maintained by the driver and hardware.
4432 **********************************************************************/
4434 ixgbe_print_debug_info(struct adapter *adapter)
4436 device_t dev = adapter->dev;
4437 struct rx_ring *rxr = adapter->rx_rings;
4438 struct tx_ring *txr = adapter->tx_rings;
4439 struct ixgbe_hw *hw = &adapter->hw;
4441 device_printf(dev,"Error Byte Count = %u \n",
4442 IXGBE_READ_REG(hw, IXGBE_ERRBC));
4444 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
4445 struct lro_ctrl *lro = &rxr->lro;
4446 device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
4447 i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
4448 IXGBE_READ_REG(hw, IXGBE_RDT(i)));
4449 device_printf(dev,"RX(%d) Packets Received: %lld\n",
4450 rxr->me, (long long)rxr->rx_packets);
4451 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
4452 rxr->me, (long long)rxr->rx_split_packets);
4453 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
4454 rxr->me, (long)rxr->rx_bytes);
4455 device_printf(dev,"RX(%d) IRQ Handled: %lu\n",
4456 rxr->me, (long)rxr->rx_irq);
4457 device_printf(dev,"RX(%d) LRO Queued= %d\n",
4458 rxr->me, lro->lro_queued);
4459 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
4460 rxr->me, lro->lro_flushed);
4463 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
4464 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
4465 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
4466 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
4467 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
4468 txr->me, (long)txr->total_packets);
4469 device_printf(dev,"TX(%d) IRQ Handled: %lu\n",
4470 txr->me, (long)txr->tx_irq);
4471 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
4472 txr->me, (long)txr->no_tx_desc_avail);
4475 device_printf(dev,"Link IRQ Handled: %lu\n",
4476 (long)adapter->link_irq);
4481 ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS)
4485 struct adapter *adapter;
4488 error = sysctl_handle_int(oidp, &result, 0, req);
4490 if (error || !req->newptr)
4494 adapter = (struct adapter *) arg1;
4495 ixgbe_print_hw_stats(adapter);
4501 ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS)
4504 struct adapter *adapter;
4507 error = sysctl_handle_int(oidp, &result, 0, req);
4509 if (error || !req->newptr)
4513 adapter = (struct adapter *) arg1;
4514 ixgbe_print_debug_info(adapter);
4520 ** Set flow control using sysctl:
4521 ** Flow control values:
4528 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4531 struct adapter *adapter;
4533 error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
4538 adapter = (struct adapter *) arg1;
4539 switch (ixgbe_flow_control) {
4540 case ixgbe_fc_rx_pause:
4541 case ixgbe_fc_tx_pause:
4543 adapter->hw.fc.requested_mode = ixgbe_flow_control;
4547 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4550 ixgbe_fc_enable(&adapter->hw, 0);
4555 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
4556 const char *description, int *limit, int value)
4559 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4560 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4561 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4564 #ifdef IXGBE_TIMESYNC
4566 * Initialize the Time Sync Feature
4569 ixgbe_tsync_init(struct adapter *adapter)
4571 device_t dev = adapter->dev;
4575 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TIMINCA, (1<<24) |
4576 20833/PICOSECS_PER_TICK);
4578 adapter->last_stamp = IXGBE_READ_REG(&adapter->hw, IXGBE_SYSTIML);
4579 adapter->last_stamp |= (u64)IXGBE_READ_REG(&adapter->hw,
4580 IXGBE_SYSTIMH) << 32ULL;
4582 /* Enable the TX side */
4583 tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
4585 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCTXCTL, tx_ctl);
4586 IXGBE_WRITE_FLUSH(&adapter->hw);
4588 tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
4589 if ((tx_ctl & 0x10) == 0) {
4590 device_printf(dev, "Failed to enable TX timestamping\n");
4595 rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
4596 rx_ctl |= 0x10; /* Enable the feature */
4597 rx_ctl |= 0x04; /* This value turns on Ver 1 and 2 */
4598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCRXCTL, rx_ctl);
4601 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 (Ethertype)
4602 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable filter)
4603 * Ethertype Filter Queue Filter[0][31] = 0x1 (Enable Timestamping)
4605 IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0), 0xC00088f7);
4607 IXGBE_WRITE_FLUSH(&adapter->hw);
4609 rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
4610 if ((rx_ctl & 0x10) == 0) {
4611 device_printf(dev, "Failed to enable RX timestamping\n");
4615 device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
4621 * Disable the Time Sync Feature
4624 ixgbe_tsync_disable(struct adapter *adapter)
4628 tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
4630 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCTXCTL, tx_ctl);
4631 IXGBE_WRITE_FLUSH(&adapter->hw);
4633 /* Invalidate TX Timestamp */
4634 IXGBE_READ_REG(&adapter->hw, IXGBE_TXSTMPH);
4636 tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
4638 HW_DEBUGOUT("Failed to disable TX timestamping\n");
4640 rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
4643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCRXCTL, rx_ctl);
4644 IXGBE_WRITE_FLUSH(&adapter->hw);
4646 /* Invalidate RX Timestamp */
4647 IXGBE_READ_REG(&adapter->hw, IXGBE_RXSATRH);
4649 rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
4651 HW_DEBUGOUT("Failed to disable RX timestamping\n");
4656 #endif /* IXGBE_TIMESYNC */