1 /******************************************************************************
3 Copyright (c) 2001-2009, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
41 /*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44 int ixgbe_display_debug_stats = 0;
46 /*********************************************************************
48 *********************************************************************/
49 char ixgbe_driver_version[] = "1.8.8";
51 /*********************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 *********************************************************************/
61 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
77 /* required last entry */
81 /*********************************************************************
82 * Table of branding strings
83 *********************************************************************/
85 static char *ixgbe_strings[] = {
86 "Intel(R) PRO/10GbE PCI-Express Network Driver"
89 /*********************************************************************
91 *********************************************************************/
92 static int ixgbe_probe(device_t);
93 static int ixgbe_attach(device_t);
94 static int ixgbe_detach(device_t);
95 static int ixgbe_shutdown(device_t);
96 static void ixgbe_start(struct ifnet *);
97 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
98 #if __FreeBSD_version >= 800000
99 static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
100 static int ixgbe_mq_start_locked(struct ifnet *,
101 struct tx_ring *, struct mbuf *);
102 static void ixgbe_qflush(struct ifnet *);
104 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
105 static void ixgbe_watchdog(struct adapter *);
106 static void ixgbe_init(void *);
107 static void ixgbe_init_locked(struct adapter *);
108 static void ixgbe_stop(void *);
109 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
110 static int ixgbe_media_change(struct ifnet *);
111 static void ixgbe_identify_hardware(struct adapter *);
112 static int ixgbe_allocate_pci_resources(struct adapter *);
113 static int ixgbe_allocate_msix(struct adapter *);
114 static int ixgbe_allocate_legacy(struct adapter *);
115 static int ixgbe_allocate_queues(struct adapter *);
116 static int ixgbe_setup_msix(struct adapter *);
117 static void ixgbe_free_pci_resources(struct adapter *);
118 static void ixgbe_local_timer(void *);
119 static int ixgbe_hardware_init(struct adapter *);
120 static void ixgbe_setup_interface(device_t, struct adapter *);
122 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
123 static int ixgbe_setup_transmit_structures(struct adapter *);
124 static void ixgbe_setup_transmit_ring(struct tx_ring *);
125 static void ixgbe_initialize_transmit_units(struct adapter *);
126 static void ixgbe_free_transmit_structures(struct adapter *);
127 static void ixgbe_free_transmit_buffers(struct tx_ring *);
129 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
130 static int ixgbe_setup_receive_structures(struct adapter *);
131 static int ixgbe_setup_receive_ring(struct rx_ring *);
132 static void ixgbe_initialize_receive_units(struct adapter *);
133 static void ixgbe_free_receive_structures(struct adapter *);
134 static void ixgbe_free_receive_buffers(struct rx_ring *);
136 static void ixgbe_init_moderation(struct adapter *);
137 static void ixgbe_enable_intr(struct adapter *);
138 static void ixgbe_disable_intr(struct adapter *);
139 static void ixgbe_update_stats_counters(struct adapter *);
140 static bool ixgbe_txeof(struct tx_ring *);
141 static bool ixgbe_rxeof(struct rx_ring *, int);
142 static void ixgbe_rx_checksum(u32, struct mbuf *);
143 static void ixgbe_set_promisc(struct adapter *);
144 static void ixgbe_disable_promisc(struct adapter *);
145 static void ixgbe_set_multi(struct adapter *);
146 static void ixgbe_print_hw_stats(struct adapter *);
147 static void ixgbe_print_debug_info(struct adapter *);
148 static void ixgbe_update_link_status(struct adapter *);
149 static int ixgbe_get_buf(struct rx_ring *, int, u8);
150 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
151 static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
152 static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
153 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
154 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
155 struct ixgbe_dma_alloc *, int);
156 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
157 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
158 const char *, int *, int);
159 static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
160 static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
161 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
162 static void ixgbe_configure_ivars(struct adapter *);
163 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
165 static void ixgbe_setup_vlan_hw_support(struct adapter *);
166 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
167 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
169 static void ixgbe_update_aim(struct rx_ring *);
171 /* Support for pluggable optic modules */
172 static bool ixgbe_sfp_probe(struct adapter *);
174 /* Legacy (single vector interrupt handler */
175 static void ixgbe_legacy_irq(void *);
177 /* The MSI/X Interrupt handlers */
178 static void ixgbe_msix_tx(void *);
179 static void ixgbe_msix_rx(void *);
180 static void ixgbe_msix_link(void *);
182 /* Deferred interrupt tasklets */
183 static void ixgbe_handle_tx(void *, int);
184 static void ixgbe_handle_rx(void *, int);
185 static void ixgbe_handle_link(void *, int);
186 static void ixgbe_handle_msf(void *, int);
187 static void ixgbe_handle_mod(void *, int);
190 /*********************************************************************
191 * FreeBSD Device Interface Entry Points
192 *********************************************************************/
194 static device_method_t ixgbe_methods[] = {
195 /* Device interface */
196 DEVMETHOD(device_probe, ixgbe_probe),
197 DEVMETHOD(device_attach, ixgbe_attach),
198 DEVMETHOD(device_detach, ixgbe_detach),
199 DEVMETHOD(device_shutdown, ixgbe_shutdown),
203 static driver_t ixgbe_driver = {
204 "ix", ixgbe_methods, sizeof(struct adapter),
207 static devclass_t ixgbe_devclass;
208 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
210 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
211 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
214 ** TUNEABLE PARAMETERS:
218 ** These parameters are used in Adaptive
219 ** Interrupt Moderation. The value is set
220 ** into EITR and controls the interrupt
221 ** frequency. They can be modified but
222 ** be careful in tuning them.
224 static int ixgbe_enable_aim = TRUE;
225 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
226 static int ixgbe_low_latency = IXGBE_LOW_LATENCY;
227 TUNABLE_INT("hw.ixgbe.low_latency", &ixgbe_low_latency);
228 static int ixgbe_ave_latency = IXGBE_AVE_LATENCY;
229 TUNABLE_INT("hw.ixgbe.ave_latency", &ixgbe_ave_latency);
230 static int ixgbe_bulk_latency = IXGBE_BULK_LATENCY;
231 TUNABLE_INT("hw.ixgbe.bulk_latency", &ixgbe_bulk_latency);
233 /* How many packets rxeof tries to clean at a time */
234 static int ixgbe_rx_process_limit = 100;
235 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
237 /* Flow control setting, default to full */
238 static int ixgbe_flow_control = ixgbe_fc_full;
239 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
242 * MSIX should be the default for best performance,
243 * but this allows it to be forced off for testing.
245 static int ixgbe_enable_msix = 1;
246 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
249 * Number of Queues, should normally
250 * be left at 0, it then autoconfigures to
251 * the number of cpus. Each queue is a pair
252 * of RX and TX rings with a dedicated interrupt
254 static int ixgbe_num_queues = 0;
255 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
257 /* Number of TX descriptors per ring */
258 static int ixgbe_txd = DEFAULT_TXD;
259 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
261 /* Number of RX descriptors per ring */
262 static int ixgbe_rxd = DEFAULT_RXD;
263 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
265 /* Total number of Interfaces - need for config sanity check */
266 static int ixgbe_total_ports;
269 ** Shadow VFTA table, this is needed because
270 ** the real filter table gets cleared during
271 ** a soft reset and we need to repopulate it.
273 static u32 ixgbe_shadow_vfta[IXGBE_VFTA_SIZE];
276 ** The number of scatter-gather segments
277 ** differs for 82598 and 82599, default to
280 static int ixgbe_num_segs = IXGBE_82598_SCATTER;
282 /*********************************************************************
283 * Device identification routine
285 * ixgbe_probe determines if the driver should be loaded on
286 * adapter based on PCI vendor/device id of the adapter.
288 * return 0 on success, positive on failure
289 *********************************************************************/
292 ixgbe_probe(device_t dev)
294 ixgbe_vendor_info_t *ent;
296 u16 pci_vendor_id = 0;
297 u16 pci_device_id = 0;
298 u16 pci_subvendor_id = 0;
299 u16 pci_subdevice_id = 0;
300 char adapter_name[256];
302 INIT_DEBUGOUT("ixgbe_probe: begin");
304 pci_vendor_id = pci_get_vendor(dev);
305 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
308 pci_device_id = pci_get_device(dev);
309 pci_subvendor_id = pci_get_subvendor(dev);
310 pci_subdevice_id = pci_get_subdevice(dev);
312 ent = ixgbe_vendor_info_array;
313 while (ent->vendor_id != 0) {
314 if ((pci_vendor_id == ent->vendor_id) &&
315 (pci_device_id == ent->device_id) &&
317 ((pci_subvendor_id == ent->subvendor_id) ||
318 (ent->subvendor_id == 0)) &&
320 ((pci_subdevice_id == ent->subdevice_id) ||
321 (ent->subdevice_id == 0))) {
322 sprintf(adapter_name, "%s, Version - %s",
323 ixgbe_strings[ent->index],
324 ixgbe_driver_version);
325 device_set_desc_copy(dev, adapter_name);
334 /*********************************************************************
335 * Device initialization routine
337 * The attach entry point is called when the driver is being loaded.
338 * This routine identifies the type of hardware, allocates all resources
339 * and initializes the hardware.
341 * return 0 on success, positive on failure
342 *********************************************************************/
345 ixgbe_attach(device_t dev)
347 struct adapter *adapter;
353 INIT_DEBUGOUT("ixgbe_attach: begin");
355 /* Allocate, clear, and link in our adapter structure */
356 adapter = device_get_softc(dev);
357 adapter->dev = adapter->osdep.dev = dev;
361 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
363 /* Keep track of optics */
364 pci_device_id = pci_get_device(dev);
365 switch (pci_device_id) {
366 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
367 case IXGBE_DEV_ID_82598EB_CX4 :
368 adapter->optics = IFM_10G_CX4;
370 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
371 case IXGBE_DEV_ID_82598_DA_DUAL_PORT :
372 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
373 case IXGBE_DEV_ID_82598AT :
374 adapter->optics = IFM_10G_SR;
376 case IXGBE_DEV_ID_82598EB_XF_LR :
377 adapter->optics = IFM_10G_LR;
379 case IXGBE_DEV_ID_82599_SFP :
380 adapter->optics = IFM_10G_SR;
381 ixgbe_num_segs = IXGBE_82599_SCATTER;
383 case IXGBE_DEV_ID_82599_KX4 :
384 adapter->optics = IFM_10G_CX4;
385 ixgbe_num_segs = IXGBE_82599_SCATTER;
387 case IXGBE_DEV_ID_82599_XAUI_LOM :
388 ixgbe_num_segs = IXGBE_82599_SCATTER;
394 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
395 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
396 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
397 adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
399 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
400 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
401 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
402 adapter, 0, ixgbe_sysctl_debug, "I", "Debug Info");
404 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
405 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
406 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
407 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
409 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
410 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
411 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
412 &ixgbe_enable_aim, 1, "Interrupt Moderation");
414 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
415 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
416 OID_AUTO, "low_latency", CTLTYPE_INT|CTLFLAG_RW,
417 &ixgbe_low_latency, 1, "Low Latency");
419 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
420 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
421 OID_AUTO, "ave_latency", CTLTYPE_INT|CTLFLAG_RW,
422 &ixgbe_ave_latency, 1, "Average Latency");
424 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
425 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
426 OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
427 &ixgbe_bulk_latency, 1, "Bulk Latency");
429 /* Set up the timer callout */
430 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
432 /* Determine hardware revision */
433 ixgbe_identify_hardware(adapter);
435 /* Do base PCI setup - map BAR0 */
436 if (ixgbe_allocate_pci_resources(adapter)) {
437 device_printf(dev, "Allocation of PCI resources failed\n");
442 /* Do descriptor calc and sanity checks */
443 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
444 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
445 device_printf(dev, "TXD config issue, using default!\n");
446 adapter->num_tx_desc = DEFAULT_TXD;
448 adapter->num_tx_desc = ixgbe_txd;
451 ** With many RX rings it is easy to exceed the
452 ** system mbuf allocation. Tuning nmbclusters
453 ** can alleviate this.
455 if (nmbclusters > 0 ) {
457 /* Calculate the total RX mbuf needs */
458 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
459 if (s > nmbclusters) {
460 device_printf(dev, "RX Descriptors exceed "
461 "system mbuf max, using default instead!\n");
462 ixgbe_rxd = DEFAULT_RXD;
466 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
467 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
468 device_printf(dev, "RXD config issue, using default!\n");
469 adapter->num_rx_desc = DEFAULT_RXD;
471 adapter->num_rx_desc = ixgbe_rxd;
473 /* Allocate our TX/RX Queues */
474 if (ixgbe_allocate_queues(adapter)) {
479 /* Initialize the shared code */
480 error = ixgbe_init_shared_code(hw);
481 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
483 ** No optics in this port, set up
484 ** so the timer routine will probe
485 ** for later insertion.
487 adapter->sfp_probe = TRUE;
489 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
490 device_printf(dev,"Unsupported SFP+ module detected!\n");
494 device_printf(dev,"Unable to initialize the shared code\n");
499 /* Initialize the hardware */
500 if (ixgbe_hardware_init(adapter)) {
501 device_printf(dev,"Unable to initialize the hardware\n");
506 if ((adapter->msix > 1) && (ixgbe_enable_msix))
507 error = ixgbe_allocate_msix(adapter);
509 error = ixgbe_allocate_legacy(adapter);
513 /* Setup OS specific network interface */
514 ixgbe_setup_interface(dev, adapter);
516 #ifdef IXGBE_IEEE1588
518 ** Setup the timer: IEEE 1588 support
520 adapter->cycles.read = ixgbe_read_clock;
521 adapter->cycles.mask = (u64)-1;
522 adapter->cycles.mult = 1;
523 adapter->cycles.shift = IXGBE_TSYNC_SHIFT;
524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TIMINCA, (1<<24) |
525 IXGBE_TSYNC_CYCLE_TIME * IXGBE_TSYNC_SHIFT);
526 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYSTIML, 0x00000000);
527 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYSTIMH, 0xFF800000);
529 // JFV - this is not complete yet
532 /* Sysctl for limiting the amount of work done in the taskqueue */
533 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
534 "max number of rx packets to process", &adapter->rx_process_limit,
535 ixgbe_rx_process_limit);
537 /* Initialize statistics */
538 ixgbe_update_stats_counters(adapter);
540 /* Register for VLAN events */
541 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
542 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
543 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
544 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
546 /* let hardware know driver is loaded */
547 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
548 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
549 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
551 INIT_DEBUGOUT("ixgbe_attach: end");
554 ixgbe_free_transmit_structures(adapter);
555 ixgbe_free_receive_structures(adapter);
557 ixgbe_free_pci_resources(adapter);
562 /*********************************************************************
563 * Device removal routine
565 * The detach entry point is called when the driver is being removed.
566 * This routine stops the adapter and deallocates all the resources
567 * that were allocated for driver operation.
569 * return 0 on success, positive on failure
570 *********************************************************************/
573 ixgbe_detach(device_t dev)
575 struct adapter *adapter = device_get_softc(dev);
576 struct tx_ring *txr = adapter->tx_rings;
577 struct rx_ring *rxr = adapter->rx_rings;
580 INIT_DEBUGOUT("ixgbe_detach: begin");
582 /* Make sure VLANS are not using driver */
583 if (adapter->ifp->if_vlantrunk != NULL) {
584 device_printf(dev,"Vlan in use, detach first\n");
588 IXGBE_CORE_LOCK(adapter);
590 IXGBE_CORE_UNLOCK(adapter);
592 for (int i = 0; i < adapter->num_queues; i++, txr++) {
594 taskqueue_drain(txr->tq, &txr->tx_task);
595 taskqueue_free(txr->tq);
599 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
601 taskqueue_drain(rxr->tq, &rxr->rx_task);
602 taskqueue_free(rxr->tq);
606 /* Drain the Link queue */
608 taskqueue_drain(adapter->tq, &adapter->link_task);
609 taskqueue_drain(adapter->tq, &adapter->mod_task);
610 taskqueue_drain(adapter->tq, &adapter->msf_task);
611 taskqueue_free(adapter->tq);
614 /* let hardware know driver is unloading */
615 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
616 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
619 /* Unregister VLAN events */
620 if (adapter->vlan_attach != NULL)
621 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
622 if (adapter->vlan_detach != NULL)
623 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
625 ether_ifdetach(adapter->ifp);
626 callout_drain(&adapter->timer);
627 ixgbe_free_pci_resources(adapter);
628 bus_generic_detach(dev);
629 if_free(adapter->ifp);
631 ixgbe_free_transmit_structures(adapter);
632 ixgbe_free_receive_structures(adapter);
634 IXGBE_CORE_LOCK_DESTROY(adapter);
638 /*********************************************************************
640 * Shutdown entry point
642 **********************************************************************/
645 ixgbe_shutdown(device_t dev)
647 struct adapter *adapter = device_get_softc(dev);
648 IXGBE_CORE_LOCK(adapter);
650 IXGBE_CORE_UNLOCK(adapter);
655 /*********************************************************************
656 * Transmit entry point
658 * ixgbe_start is called by the stack to initiate a transmit.
659 * The driver will remain in this routine as long as there are
660 * packets to transmit and transmit resources are available.
661 * In case resources are not available stack is notified and
662 * the packet is requeued.
663 **********************************************************************/
666 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
669 struct adapter *adapter = txr->adapter;
671 IXGBE_TX_LOCK_ASSERT(txr);
673 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
676 if (!adapter->link_active)
679 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
681 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
685 if (ixgbe_xmit(txr, &m_head)) {
688 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
689 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
692 /* Send a copy of the frame to the BPF listener */
693 ETHER_BPF_MTAP(ifp, m_head);
695 /* Set timeout in case hardware has problems transmitting */
696 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
703 * Legacy TX start - called by the stack, this
704 * always uses the first tx ring, and should
705 * not be used with multiqueue tx enabled.
708 ixgbe_start(struct ifnet *ifp)
710 struct adapter *adapter = ifp->if_softc;
711 struct tx_ring *txr = adapter->tx_rings;
713 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
715 ixgbe_start_locked(txr, ifp);
716 IXGBE_TX_UNLOCK(txr);
721 #if __FreeBSD_version >= 800000
723 ** Multiqueue Transmit driver
727 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
729 struct adapter *adapter = ifp->if_softc;
733 /* Which queue to use */
734 if ((m->m_flags & M_FLOWID) != 0)
735 i = m->m_pkthdr.flowid % adapter->num_queues;
736 txr = &adapter->tx_rings[i];
738 if (IXGBE_TX_TRYLOCK(txr)) {
739 err = ixgbe_mq_start_locked(ifp, txr, m);
740 IXGBE_TX_UNLOCK(txr);
742 err = drbr_enqueue(ifp, txr->br, m);
748 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
750 struct adapter *adapter = txr->adapter;
754 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
755 err = drbr_enqueue(ifp, txr->br, m);
759 if (m == NULL) /* Called by tasklet */
762 /* If nothing queued go right to xmit */
763 if (drbr_empty(ifp, txr->br)) {
764 if (ixgbe_xmit(txr, &m)) {
765 if (m && (err = drbr_enqueue(ifp, txr->br, m)) != 0)
768 /* Success, update stats */
769 drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
770 /* Send a copy of the frame to the BPF listener */
771 ETHER_BPF_MTAP(ifp, m);
772 /* Set the watchdog */
773 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
776 } else if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
780 if (drbr_empty(ifp, txr->br))
783 /* Process the queue */
785 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
787 next = drbr_dequeue(ifp, txr->br);
790 if (ixgbe_xmit(txr, &next))
792 ETHER_BPF_MTAP(ifp, next);
793 /* Set the watchdog */
794 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
797 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD)
798 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
804 ** Flush all ring buffers
807 ixgbe_qflush(struct ifnet *ifp)
809 struct adapter *adapter = ifp->if_softc;
810 struct tx_ring *txr = adapter->tx_rings;
813 for (int i = 0; i < adapter->num_queues; i++, txr++) {
815 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
817 IXGBE_TX_UNLOCK(txr);
821 #endif /* __FreeBSD_version >= 800000 */
823 /*********************************************************************
826 * ixgbe_ioctl is called when the user wants to configure the
829 * return 0 on success, positive on failure
830 **********************************************************************/
833 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
835 struct adapter *adapter = ifp->if_softc;
836 struct ifreq *ifr = (struct ifreq *) data;
838 struct ifaddr *ifa = (struct ifaddr *) data;
845 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
846 if (ifa->ifa_addr->sa_family == AF_INET) {
847 ifp->if_flags |= IFF_UP;
848 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
849 IXGBE_CORE_LOCK(adapter);
850 ixgbe_init_locked(adapter);
851 IXGBE_CORE_UNLOCK(adapter);
853 arp_ifinit(ifp, ifa);
856 ether_ioctl(ifp, command, data);
859 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
860 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
863 IXGBE_CORE_LOCK(adapter);
864 ifp->if_mtu = ifr->ifr_mtu;
865 adapter->max_frame_size =
866 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
867 ixgbe_init_locked(adapter);
868 IXGBE_CORE_UNLOCK(adapter);
872 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
873 IXGBE_CORE_LOCK(adapter);
874 if (ifp->if_flags & IFF_UP) {
875 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
876 if ((ifp->if_flags ^ adapter->if_flags) &
877 (IFF_PROMISC | IFF_ALLMULTI)) {
878 ixgbe_disable_promisc(adapter);
879 ixgbe_set_promisc(adapter);
882 ixgbe_init_locked(adapter);
884 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
886 adapter->if_flags = ifp->if_flags;
887 IXGBE_CORE_UNLOCK(adapter);
891 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
892 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
893 IXGBE_CORE_LOCK(adapter);
894 ixgbe_disable_intr(adapter);
895 ixgbe_set_multi(adapter);
896 ixgbe_enable_intr(adapter);
897 IXGBE_CORE_UNLOCK(adapter);
902 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
903 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
907 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
908 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
909 if (mask & IFCAP_HWCSUM)
910 ifp->if_capenable ^= IFCAP_HWCSUM;
911 if (mask & IFCAP_TSO4)
912 ifp->if_capenable ^= IFCAP_TSO4;
913 if (mask & IFCAP_LRO)
914 ifp->if_capenable ^= IFCAP_LRO;
915 if (mask & IFCAP_VLAN_HWTAGGING)
916 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
917 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
919 VLAN_CAPABILITIES(ifp);
923 #ifdef IXGBE_IEEE1588
925 ** IOCTL support for Precision Time (IEEE 1588) Support
928 error = ixgbe_hwtstamp_ioctl(adapter, ifp);
933 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
934 error = ether_ioctl(ifp, command, data);
941 /*********************************************************************
942 * Watchdog entry point
944 * This routine is called by the local timer
945 * to detect hardware hangs .
947 **********************************************************************/
950 ixgbe_watchdog(struct adapter *adapter)
952 device_t dev = adapter->dev;
953 struct tx_ring *txr = adapter->tx_rings;
954 struct ixgbe_hw *hw = &adapter->hw;
955 bool tx_hang = FALSE;
957 IXGBE_CORE_LOCK_ASSERT(adapter);
960 * The timer is set to 5 every time ixgbe_start() queues a packet.
961 * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
962 * least one descriptor.
963 * Finally, anytime all descriptors are clean the timer is
966 for (int i = 0; i < adapter->num_queues; i++, txr++) {
970 if (txr->watchdog_timer == 0 || --txr->watchdog_timer) {
971 IXGBE_TX_UNLOCK(txr);
974 head = IXGBE_READ_REG(hw, IXGBE_TDH(i));
975 tail = IXGBE_READ_REG(hw, IXGBE_TDT(i));
976 if (head == tail) { /* last minute check */
977 IXGBE_TX_UNLOCK(txr);
980 /* Well, seems something is really hung */
982 IXGBE_TX_UNLOCK(txr);
986 if (tx_hang == FALSE)
990 * If we are in this routine because of pause frames, then don't
991 * reset the hardware.
993 if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
994 txr = adapter->tx_rings; /* reset pointer */
995 for (int i = 0; i < adapter->num_queues; i++, txr++) {
997 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
998 IXGBE_TX_UNLOCK(txr);
1004 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1005 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1006 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
1007 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
1008 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
1009 device_printf(dev,"TX(%d) desc avail = %d,"
1010 "Next TX to Clean = %d\n",
1011 i, txr->tx_avail, txr->next_tx_to_clean);
1013 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1014 adapter->watchdog_events++;
1016 ixgbe_init_locked(adapter);
1019 /*********************************************************************
1022 * This routine is used in two ways. It is used by the stack as
1023 * init entry point in network interface structure. It is also used
1024 * by the driver as a hw/sw initialization routine to get to a
1027 * return 0 on success, positive on failure
1028 **********************************************************************/
1029 #define IXGBE_MHADD_MFS_SHIFT 16
1032 ixgbe_init_locked(struct adapter *adapter)
1034 struct ifnet *ifp = adapter->ifp;
1035 device_t dev = adapter->dev;
1036 struct ixgbe_hw *hw;
1037 u32 k, txdctl, mhadd, gpie;
1041 INIT_DEBUGOUT("ixgbe_init: begin");
1044 mtx_assert(&adapter->core_mtx, MA_OWNED);
1046 ixgbe_stop(adapter);
1048 /* Get the latest mac address, User can use a LAA */
1049 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1050 IXGBE_ETH_LENGTH_OF_ADDRESS);
1051 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, 1);
1052 adapter->hw.addr_ctrl.rar_used_count = 1;
1054 /* Initialize the hardware */
1055 if (ixgbe_hardware_init(adapter)) {
1056 device_printf(dev, "Unable to initialize the hardware\n");
1060 /* Prepare transmit descriptors and buffers */
1061 if (ixgbe_setup_transmit_structures(adapter)) {
1062 device_printf(dev,"Could not setup transmit structures\n");
1063 ixgbe_stop(adapter);
1067 ixgbe_initialize_transmit_units(adapter);
1069 /* Setup Multicast table */
1070 ixgbe_set_multi(adapter);
1073 ** Determine the correct mbuf pool
1074 ** for doing jumbo/headersplit
1076 if (ifp->if_mtu > ETHERMTU)
1077 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1079 adapter->rx_mbuf_sz = MCLBYTES;
1081 /* Prepare receive descriptors and buffers */
1082 if (ixgbe_setup_receive_structures(adapter)) {
1083 device_printf(dev,"Could not setup receive structures\n");
1084 ixgbe_stop(adapter);
1088 /* Configure RX settings */
1089 ixgbe_initialize_receive_units(adapter);
1091 /* Configure Interrupt Moderation */
1092 ixgbe_init_moderation(adapter);
1094 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1096 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1097 gpie |= IXGBE_SDP1_GPIEN;
1098 gpie |= IXGBE_SDP2_GPIEN;
1101 /* Enable Fan Failure Interrupt */
1102 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1103 gpie |= IXGBE_SDP1_GPIEN;
1105 if (adapter->msix > 1) {
1106 /* Enable Enhanced MSIX mode */
1107 gpie |= IXGBE_GPIE_MSIX_MODE;
1108 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1111 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
1113 /* Set the various hardware offload abilities */
1114 ifp->if_hwassist = 0;
1115 if (ifp->if_capenable & IFCAP_TSO4)
1116 ifp->if_hwassist |= CSUM_TSO;
1117 if (ifp->if_capenable & IFCAP_TXCSUM)
1118 ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
1121 if (ifp->if_mtu > ETHERMTU) {
1122 mhadd = IXGBE_READ_REG(&adapter->hw, IXGBE_MHADD);
1123 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1124 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1125 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MHADD, mhadd);
1128 /* Now enable all the queues */
1130 for (int i = 0; i < adapter->num_queues; i++) {
1131 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
1132 txdctl |= IXGBE_TXDCTL_ENABLE;
1133 /* Set WTHRESH to 8, burst writeback */
1134 txdctl |= (8 << 16);
1135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
1138 for (int i = 0; i < adapter->num_queues; i++) {
1139 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
1140 /* PTHRESH set to 32 */
1142 rxdctl |= IXGBE_RXDCTL_ENABLE;
1143 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
1144 for (k = 0; k < 10; k++) {
1145 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1146 IXGBE_RXDCTL_ENABLE)
1152 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1155 /* Set up VLAN offloads and filter */
1156 ixgbe_setup_vlan_hw_support(adapter);
1158 /* Enable Receive engine */
1159 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1160 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1161 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1162 rxctrl |= IXGBE_RXCTRL_RXEN;
1163 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1165 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1167 /* Set up MSI/X routing */
1168 if (ixgbe_enable_msix)
1169 ixgbe_configure_ivars(adapter);
1170 else { /* Simple settings for Legacy/MSI */
1171 ixgbe_set_ivar(adapter, 0, 0, 0);
1172 ixgbe_set_ivar(adapter, 0, 0, 1);
1175 ixgbe_enable_intr(adapter);
1178 ** Check on any SFP devices that
1179 ** need to be kick-started
1181 err = hw->phy.ops.identify(hw);
1182 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1184 "Unsupported SFP+ module type was detected.\n");
1188 if (ixgbe_is_sfp(hw)) {
1189 if (hw->phy.multispeed_fiber) {
1190 hw->mac.ops.setup_sfp(hw);
1191 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1193 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1195 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1197 /* Now inform the stack we're ready */
1198 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1199 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1205 ixgbe_init(void *arg)
1207 struct adapter *adapter = arg;
1209 IXGBE_CORE_LOCK(adapter);
1210 ixgbe_init_locked(adapter);
1211 IXGBE_CORE_UNLOCK(adapter);
1218 ** MSIX Interrupt Handlers and Tasklets
1223 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1225 struct ixgbe_hw *hw = &adapter->hw;
1226 u64 queue = (u64)(1 << vector);
1229 if (hw->mac.type == ixgbe_mac_82598EB) {
1230 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1231 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1233 mask = (queue & 0xFFFFFFFF);
1235 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1236 mask = (queue >> 32);
1238 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1243 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1245 struct ixgbe_hw *hw = &adapter->hw;
1246 u64 queue = (u64)(1 << vector);
1249 if (hw->mac.type == ixgbe_mac_82598EB) {
1250 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1251 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1253 mask = (queue & 0xFFFFFFFF);
1255 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1256 mask = (queue >> 32);
1258 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1263 ixgbe_rearm_rx_queues(struct adapter *adapter, u64 queues)
1267 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1268 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
1269 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1271 mask = (queues & 0xFFFFFFFF);
1272 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
1273 mask = (queues >> 32);
1274 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
1279 ixgbe_handle_rx(void *context, int pending)
1281 struct rx_ring *rxr = context;
1282 struct adapter *adapter = rxr->adapter;
1283 u32 loop = MAX_LOOP;
1287 more = ixgbe_rxeof(rxr, -1);
1288 } while (loop-- && more);
1289 /* Reenable this interrupt */
1290 ixgbe_enable_queue(adapter, rxr->msix);
1294 ixgbe_handle_tx(void *context, int pending)
1296 struct tx_ring *txr = context;
1297 struct adapter *adapter = txr->adapter;
1298 struct ifnet *ifp = adapter->ifp;
1299 u32 loop = MAX_LOOP;
1304 more = ixgbe_txeof(txr);
1305 } while (loop-- && more);
1307 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1308 #if __FreeBSD_version >= 800000
1309 if (!drbr_empty(ifp, txr->br))
1310 ixgbe_mq_start_locked(ifp, txr, NULL);
1312 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1313 ixgbe_start_locked(txr, ifp);
1317 IXGBE_TX_UNLOCK(txr);
1318 /* Reenable this interrupt */
1319 ixgbe_enable_queue(adapter, txr->msix);
1323 /*********************************************************************
1325 * Legacy Interrupt Service routine
1327 **********************************************************************/
1330 ixgbe_legacy_irq(void *arg)
1332 struct adapter *adapter = arg;
1333 struct ixgbe_hw *hw = &adapter->hw;
1334 struct tx_ring *txr = adapter->tx_rings;
1335 struct rx_ring *rxr = adapter->rx_rings;
1337 u32 reg_eicr, loop = MAX_LOOP;
1340 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1342 if (reg_eicr == 0) {
1343 ixgbe_enable_intr(adapter);
1347 if (ixgbe_rxeof(rxr, adapter->rx_process_limit))
1348 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1353 more = ixgbe_txeof(txr);
1354 } while (loop-- && more);
1355 IXGBE_TX_UNLOCK(txr);
1358 taskqueue_enqueue(txr->tq, &txr->tx_task);
1360 /* Check for fan failure */
1361 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1362 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1363 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1364 "REPLACE IMMEDIATELY!!\n");
1365 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1368 /* Link status change */
1369 if (reg_eicr & IXGBE_EICR_LSC) {
1370 ixgbe_check_link(&adapter->hw,
1371 &adapter->link_speed, &adapter->link_up, 0);
1372 ixgbe_update_link_status(adapter);
1375 /* Update interrupt rate */
1376 if (ixgbe_enable_aim == TRUE)
1377 ixgbe_update_aim(rxr);
1379 ixgbe_enable_intr(adapter);
1384 /*********************************************************************
1386 * MSI TX Interrupt Service routine
1388 **********************************************************************/
1390 ixgbe_msix_tx(void *arg)
1392 struct tx_ring *txr = arg;
1393 struct adapter *adapter = txr->adapter;
1396 ixgbe_disable_queue(adapter, txr->msix);
1400 more = ixgbe_txeof(txr);
1401 IXGBE_TX_UNLOCK(txr);
1403 taskqueue_enqueue(txr->tq, &txr->tx_task);
1404 else /* Reenable this interrupt */
1405 ixgbe_enable_queue(adapter, txr->msix);
1410 /*********************************************************************
1412 * MSIX RX Interrupt Service routine
1414 **********************************************************************/
1416 ixgbe_msix_rx(void *arg)
1418 struct rx_ring *rxr = arg;
1419 struct adapter *adapter = rxr->adapter;
1422 ixgbe_disable_queue(adapter, rxr->msix);
1425 more = ixgbe_rxeof(rxr, adapter->rx_process_limit);
1427 /* Update interrupt rate */
1428 if (ixgbe_enable_aim == TRUE)
1429 ixgbe_update_aim(rxr);
1432 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1434 ixgbe_enable_queue(adapter, rxr->msix);
1440 ixgbe_msix_link(void *arg)
1442 struct adapter *adapter = arg;
1443 struct ixgbe_hw *hw = &adapter->hw;
1446 ++adapter->link_irq;
1448 /* First get the cause */
1449 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1450 /* Clear interrupt with write */
1451 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1453 /* Link status change */
1454 if (reg_eicr & IXGBE_EICR_LSC)
1455 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1457 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1458 if (reg_eicr & IXGBE_EICR_ECC) {
1459 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1460 "Please Reboot!!\n");
1461 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1463 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1464 /* Clear the interrupt */
1465 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1466 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1467 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1468 /* Clear the interrupt */
1469 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1470 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1474 /* Check for fan failure */
1475 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1476 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1477 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1478 "REPLACE IMMEDIATELY!!\n");
1479 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1482 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1487 ** Routine to do adjust the RX EITR value based on traffic,
1488 ** its a simple three state model, but seems to help.
1490 ** Note that the three EITR values are tuneable using
1491 ** sysctl in real time. The feature can be effectively
1492 ** nullified by setting them equal.
1494 #define BULK_THRESHOLD 10000
1495 #define AVE_THRESHOLD 1600
1498 ixgbe_update_aim(struct rx_ring *rxr)
1500 struct adapter *adapter = rxr->adapter;
1503 /* Update interrupt moderation based on traffic */
1504 olditr = rxr->eitr_setting;
1507 /* Idle, don't change setting */
1508 if (rxr->bytes == 0)
1511 if (olditr == ixgbe_low_latency) {
1512 if (rxr->bytes > AVE_THRESHOLD)
1513 newitr = ixgbe_ave_latency;
1514 } else if (olditr == ixgbe_ave_latency) {
1515 if (rxr->bytes < AVE_THRESHOLD)
1516 newitr = ixgbe_low_latency;
1517 else if (rxr->bytes > BULK_THRESHOLD)
1518 newitr = ixgbe_bulk_latency;
1519 } else if (olditr == ixgbe_bulk_latency) {
1520 if (rxr->bytes < BULK_THRESHOLD)
1521 newitr = ixgbe_ave_latency;
1524 if (olditr != newitr) {
1525 /* Change interrupt rate */
1526 rxr->eitr_setting = newitr;
1527 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rxr->me),
1528 newitr | (newitr << 16));
1536 ixgbe_init_moderation(struct adapter *adapter)
1538 struct rx_ring *rxr = adapter->rx_rings;
1539 struct tx_ring *txr = adapter->tx_rings;
1541 /* Single interrupt - MSI or Legacy? */
1542 if (adapter->msix < 2) {
1543 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(0), 100);
1547 /* TX irq moderation rate is fixed */
1548 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1549 IXGBE_WRITE_REG(&adapter->hw,
1550 IXGBE_EITR(txr->msix), ixgbe_ave_latency);
1551 txr->watchdog_timer = FALSE;
1554 /* RX moderation will be adapted over time, set default */
1555 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1556 IXGBE_WRITE_REG(&adapter->hw,
1557 IXGBE_EITR(rxr->msix), ixgbe_low_latency);
1560 /* Set Link moderation */
1561 IXGBE_WRITE_REG(&adapter->hw,
1562 IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
1566 /*********************************************************************
1568 * Media Ioctl callback
1570 * This routine is called whenever the user queries the status of
1571 * the interface using ifconfig.
1573 **********************************************************************/
1575 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1577 struct adapter *adapter = ifp->if_softc;
1579 INIT_DEBUGOUT("ixgbe_media_status: begin");
1580 IXGBE_CORE_LOCK(adapter);
1581 ixgbe_update_link_status(adapter);
1583 ifmr->ifm_status = IFM_AVALID;
1584 ifmr->ifm_active = IFM_ETHER;
1586 if (!adapter->link_active) {
1587 IXGBE_CORE_UNLOCK(adapter);
1591 ifmr->ifm_status |= IFM_ACTIVE;
1593 switch (adapter->link_speed) {
1594 case IXGBE_LINK_SPEED_1GB_FULL:
1595 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1597 case IXGBE_LINK_SPEED_10GB_FULL:
1598 ifmr->ifm_active |= adapter->optics | IFM_FDX;
1602 IXGBE_CORE_UNLOCK(adapter);
1607 /*********************************************************************
1609 * Media Ioctl callback
1611 * This routine is called when the user changes speed/duplex using
1612 * media/mediopt option with ifconfig.
1614 **********************************************************************/
1616 ixgbe_media_change(struct ifnet * ifp)
1618 struct adapter *adapter = ifp->if_softc;
1619 struct ifmedia *ifm = &adapter->media;
1621 INIT_DEBUGOUT("ixgbe_media_change: begin");
1623 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1626 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1628 adapter->hw.mac.autoneg = TRUE;
1629 adapter->hw.phy.autoneg_advertised =
1630 IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
1633 device_printf(adapter->dev, "Only auto media type\n");
1640 /*********************************************************************
1642 * This routine maps the mbufs to tx descriptors.
1643 * WARNING: while this code is using an MQ style infrastructure,
1644 * it would NOT work as is with more than 1 queue.
1646 * return 0 on success, positive on failure
1647 **********************************************************************/
1650 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1652 struct adapter *adapter = txr->adapter;
1653 u32 olinfo_status = 0, cmd_type_len;
1655 int i, j, error, nsegs;
1656 int first, last = 0;
1657 struct mbuf *m_head;
1658 bus_dma_segment_t segs[ixgbe_num_segs];
1660 struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1661 union ixgbe_adv_tx_desc *txd = NULL;
1665 /* Basic descriptor defines */
1666 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1667 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1669 if (m_head->m_flags & M_VLANTAG)
1670 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1672 /* Do a clean if descriptors are low */
1673 if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
1675 /* Now do we at least have a minimal? */
1676 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD)
1681 * Important to capture the first descriptor
1682 * used because it will contain the index of
1683 * the one we tell the hardware to report back
1685 first = txr->next_avail_tx_desc;
1686 txbuf = &txr->tx_buffers[first];
1687 txbuf_mapped = txbuf;
1691 * Map the packet for DMA.
1693 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1694 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1696 if (error == EFBIG) {
1699 m = m_defrag(*m_headp, M_DONTWAIT);
1701 adapter->mbuf_defrag_failed++;
1709 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1710 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1712 if (error == ENOMEM) {
1713 adapter->no_tx_dma_setup++;
1715 } else if (error != 0) {
1716 adapter->no_tx_dma_setup++;
1721 } else if (error == ENOMEM) {
1722 adapter->no_tx_dma_setup++;
1724 } else if (error != 0) {
1725 adapter->no_tx_dma_setup++;
1731 /* Make certain there are enough descriptors */
1732 if (nsegs > txr->tx_avail - 2) {
1733 txr->no_tx_desc_avail++;
1740 ** Set up the appropriate offload context
1741 ** this becomes the first descriptor of
1744 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1745 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1746 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1747 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1748 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1749 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1753 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1754 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1756 #ifdef IXGBE_IEEE1588
1757 /* This is changing soon to an mtag detection */
1758 if (we detect this mbuf has a TSTAMP mtag)
1759 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
1762 /* Record payload length */
1764 olinfo_status |= m_head->m_pkthdr.len <<
1765 IXGBE_ADVTXD_PAYLEN_SHIFT;
1767 i = txr->next_avail_tx_desc;
1768 for (j = 0; j < nsegs; j++) {
1772 txbuf = &txr->tx_buffers[i];
1773 txd = &txr->tx_base[i];
1774 seglen = segs[j].ds_len;
1775 segaddr = htole64(segs[j].ds_addr);
1777 txd->read.buffer_addr = segaddr;
1778 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1779 cmd_type_len |seglen);
1780 txd->read.olinfo_status = htole32(olinfo_status);
1781 last = i; /* Next descriptor that will get completed */
1783 if (++i == adapter->num_tx_desc)
1786 txbuf->m_head = NULL;
1787 txbuf->eop_index = -1;
1790 txd->read.cmd_type_len |=
1791 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1792 txr->tx_avail -= nsegs;
1793 txr->next_avail_tx_desc = i;
1795 txbuf->m_head = m_head;
1797 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1799 /* Set the index of the descriptor that will be marked done */
1800 txbuf = &txr->tx_buffers[first];
1801 txbuf->eop_index = last;
1803 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1804 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1806 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1807 * hardware that this frame is available to transmit.
1809 ++txr->total_packets;
1810 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1814 bus_dmamap_unload(txr->txtag, txbuf->map);
1820 ixgbe_set_promisc(struct adapter *adapter)
1824 struct ifnet *ifp = adapter->ifp;
1826 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1828 if (ifp->if_flags & IFF_PROMISC) {
1829 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1830 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1831 } else if (ifp->if_flags & IFF_ALLMULTI) {
1832 reg_rctl |= IXGBE_FCTRL_MPE;
1833 reg_rctl &= ~IXGBE_FCTRL_UPE;
1834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1840 ixgbe_disable_promisc(struct adapter * adapter)
1844 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1846 reg_rctl &= (~IXGBE_FCTRL_UPE);
1847 reg_rctl &= (~IXGBE_FCTRL_MPE);
1848 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1854 /*********************************************************************
1857 * This routine is called whenever multicast address list is updated.
1859 **********************************************************************/
1860 #define IXGBE_RAR_ENTRIES 16
1863 ixgbe_set_multi(struct adapter *adapter)
1866 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1868 struct ifmultiaddr *ifma;
1870 struct ifnet *ifp = adapter->ifp;
1872 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1874 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1875 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1876 if (ifp->if_flags & IFF_PROMISC)
1877 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1878 else if (ifp->if_flags & IFF_ALLMULTI) {
1879 fctrl |= IXGBE_FCTRL_MPE;
1880 fctrl &= ~IXGBE_FCTRL_UPE;
1882 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1884 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1886 if_maddr_rlock(ifp);
1887 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1888 if (ifma->ifma_addr->sa_family != AF_LINK)
1890 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1891 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1892 IXGBE_ETH_LENGTH_OF_ADDRESS);
1895 if_maddr_runlock(ifp);
1898 ixgbe_update_mc_addr_list(&adapter->hw,
1899 update_ptr, mcnt, ixgbe_mc_array_itr);
1905 * This is an iterator function now needed by the multicast
1906 * shared code. It simply feeds the shared code routine the
1907 * addresses in the array of ixgbe_set_multi() one by one.
1910 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1912 u8 *addr = *update_ptr;
1916 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1917 *update_ptr = newptr;
1922 /*********************************************************************
1925 * This routine checks for link status,updates statistics,
1926 * and runs the watchdog timer.
1928 **********************************************************************/
1931 ixgbe_local_timer(void *arg)
1933 struct adapter *adapter = arg;
1934 struct ifnet *ifp = adapter->ifp;
1936 mtx_assert(&adapter->core_mtx, MA_OWNED);
1938 /* Check for pluggable optics */
1939 if (adapter->sfp_probe)
1940 if (!ixgbe_sfp_probe(adapter))
1941 goto out; /* Nothing to do */
1943 ixgbe_update_link_status(adapter);
1944 ixgbe_update_stats_counters(adapter);
1945 if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1946 ixgbe_print_hw_stats(adapter);
1949 * Each tick we check the watchdog
1950 * to protect against hardware hangs.
1952 ixgbe_watchdog(adapter);
1955 /* Trigger an RX interrupt on all queues */
1956 ixgbe_rearm_rx_queues(adapter, adapter->rx_mask);
1958 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1962 ** Note: this routine updates the OS on the link state
1963 ** the real check of the hardware only happens with
1964 ** a link interrupt.
1967 ixgbe_update_link_status(struct adapter *adapter)
1969 struct ifnet *ifp = adapter->ifp;
1970 struct tx_ring *txr = adapter->tx_rings;
1971 device_t dev = adapter->dev;
1974 if (adapter->link_up){
1975 if (adapter->link_active == FALSE) {
1977 device_printf(dev,"Link is up %d Gbps %s \n",
1978 ((adapter->link_speed == 128)? 10:1),
1980 adapter->link_active = TRUE;
1981 if_link_state_change(ifp, LINK_STATE_UP);
1983 } else { /* Link down */
1984 if (adapter->link_active == TRUE) {
1986 device_printf(dev,"Link is Down\n");
1987 if_link_state_change(ifp, LINK_STATE_DOWN);
1988 adapter->link_active = FALSE;
1989 for (int i = 0; i < adapter->num_queues;
1991 txr->watchdog_timer = FALSE;
1999 /*********************************************************************
2001 * This routine disables all traffic on the adapter by issuing a
2002 * global reset on the MAC and deallocates TX/RX buffers.
2004 **********************************************************************/
2007 ixgbe_stop(void *arg)
2010 struct adapter *adapter = arg;
2013 mtx_assert(&adapter->core_mtx, MA_OWNED);
2015 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2016 ixgbe_disable_intr(adapter);
2018 /* Tell the stack that the interface is no longer active */
2019 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2021 ixgbe_reset_hw(&adapter->hw);
2022 adapter->hw.adapter_stopped = FALSE;
2023 ixgbe_stop_adapter(&adapter->hw);
2024 callout_stop(&adapter->timer);
2026 /* reprogram the RAR[0] in case user changed it. */
2027 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2033 /*********************************************************************
2035 * Determine hardware revision.
2037 **********************************************************************/
2039 ixgbe_identify_hardware(struct adapter *adapter)
2041 device_t dev = adapter->dev;
2043 /* Save off the information about this board */
2044 adapter->hw.vendor_id = pci_get_vendor(dev);
2045 adapter->hw.device_id = pci_get_device(dev);
2046 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2047 adapter->hw.subsystem_vendor_id =
2048 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2049 adapter->hw.subsystem_device_id =
2050 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2055 /*********************************************************************
2057 * Setup the Legacy or MSI Interrupt handler
2059 **********************************************************************/
2061 ixgbe_allocate_legacy(struct adapter *adapter)
2063 device_t dev = adapter->dev;
2064 struct tx_ring *txr = adapter->tx_rings;
2065 struct rx_ring *rxr = adapter->rx_rings;
2069 if (adapter->msix == 1)
2072 /* We allocate a single interrupt resource */
2073 adapter->res = bus_alloc_resource_any(dev,
2074 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2075 if (adapter->res == NULL) {
2076 device_printf(dev, "Unable to allocate bus resource: "
2082 * Try allocating a fast interrupt and the associated deferred
2083 * processing contexts.
2085 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
2086 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
2087 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
2088 taskqueue_thread_enqueue, &txr->tq);
2089 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
2090 taskqueue_thread_enqueue, &rxr->tq);
2091 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2092 device_get_nameunit(adapter->dev));
2093 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2094 device_get_nameunit(adapter->dev));
2096 /* Tasklets for Link, SFP and Multispeed Fiber */
2097 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2098 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2099 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2100 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2101 taskqueue_thread_enqueue, &adapter->tq);
2102 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2103 device_get_nameunit(adapter->dev));
2105 if ((error = bus_setup_intr(dev, adapter->res,
2106 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2107 adapter, &adapter->tag)) != 0) {
2108 device_printf(dev, "Failed to register fast interrupt "
2109 "handler: %d\n", error);
2110 taskqueue_free(txr->tq);
2111 taskqueue_free(rxr->tq);
2121 /*********************************************************************
2123 * Setup MSIX Interrupt resources and handlers
2125 **********************************************************************/
2127 ixgbe_allocate_msix(struct adapter *adapter)
2129 device_t dev = adapter->dev;
2130 struct tx_ring *txr = adapter->tx_rings;
2131 struct rx_ring *rxr = adapter->rx_rings;
2132 int error, rid, vector = 0;
2134 /* TX setup: the code is here for multi tx,
2135 there are other parts of the driver not ready for it */
2136 for (int i = 0; i < adapter->num_queues; i++, vector++, txr++) {
2138 txr->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2139 RF_SHAREABLE | RF_ACTIVE);
2141 device_printf(dev,"Unable to allocate"
2142 " bus resource: tx interrupt [%d]\n", vector);
2145 /* Set the handler function */
2146 error = bus_setup_intr(dev, txr->res,
2147 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2148 ixgbe_msix_tx, txr, &txr->tag);
2151 device_printf(dev, "Failed to register TX handler");
2156 ** Bind the msix vector, and thus the
2157 ** ring to the corresponding cpu.
2159 if (adapter->num_queues > 1)
2160 bus_bind_intr(dev, txr->res, i);
2162 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
2163 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
2164 taskqueue_thread_enqueue, &txr->tq);
2165 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2166 device_get_nameunit(adapter->dev));
2170 for (int i = 0; i < adapter->num_queues; i++, vector++, rxr++) {
2172 rxr->res = bus_alloc_resource_any(dev,
2173 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2175 device_printf(dev,"Unable to allocate"
2176 " bus resource: rx interrupt [%d],"
2177 "rid = %d\n", i, rid);
2180 /* Set the handler function */
2181 error = bus_setup_intr(dev, rxr->res,
2182 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2183 ixgbe_msix_rx, rxr, &rxr->tag);
2186 device_printf(dev, "Failed to register RX handler");
2190 /* used in local timer */
2191 adapter->rx_mask |= (u64)(1 << vector);
2193 ** Bind the msix vector, and thus the
2194 ** ring to the corresponding cpu.
2196 if (adapter->num_queues > 1)
2197 bus_bind_intr(dev, rxr->res, i);
2199 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
2200 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
2201 taskqueue_thread_enqueue, &rxr->tq);
2202 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2203 device_get_nameunit(adapter->dev));
2206 /* Now for Link changes */
2208 adapter->res = bus_alloc_resource_any(dev,
2209 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2210 if (!adapter->res) {
2211 device_printf(dev,"Unable to allocate"
2212 " bus resource: Link interrupt [%d]\n", rid);
2215 /* Set the link handler function */
2216 error = bus_setup_intr(dev, adapter->res,
2217 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2218 ixgbe_msix_link, adapter, &adapter->tag);
2220 adapter->res = NULL;
2221 device_printf(dev, "Failed to register LINK handler");
2224 adapter->linkvec = vector;
2225 /* Tasklets for Link, SFP and Multispeed Fiber */
2226 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2227 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2228 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2229 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2230 taskqueue_thread_enqueue, &adapter->tq);
2231 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2232 device_get_nameunit(adapter->dev));
2238 * Setup Either MSI/X or MSI
2241 ixgbe_setup_msix(struct adapter *adapter)
2243 device_t dev = adapter->dev;
2244 int rid, want, queues, msgs;
2246 /* Override by tuneable */
2247 if (ixgbe_enable_msix == 0)
2250 /* First try MSI/X */
2251 rid = PCIR_BAR(MSIX_82598_BAR);
2252 adapter->msix_mem = bus_alloc_resource_any(dev,
2253 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2254 if (!adapter->msix_mem) {
2255 rid += 4; /* 82599 maps in higher BAR */
2256 adapter->msix_mem = bus_alloc_resource_any(dev,
2257 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2259 if (!adapter->msix_mem) {
2260 /* May not be enabled */
2261 device_printf(adapter->dev,
2262 "Unable to map MSIX table \n");
2266 msgs = pci_msix_count(dev);
2267 if (msgs == 0) { /* system has msix disabled */
2268 bus_release_resource(dev, SYS_RES_MEMORY,
2269 rid, adapter->msix_mem);
2270 adapter->msix_mem = NULL;
2274 /* Figure out a reasonable auto config value */
2275 queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
2277 if (ixgbe_num_queues == 0)
2278 ixgbe_num_queues = queues;
2280 ** Want two vectors (RX/TX) per queue
2281 ** plus an additional for Link.
2283 want = (ixgbe_num_queues * 2) + 1;
2287 device_printf(adapter->dev,
2288 "MSIX Configuration Problem, "
2289 "%d vectors but %d queues wanted!\n",
2291 return (0); /* Will go to Legacy setup */
2293 if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
2294 device_printf(adapter->dev,
2295 "Using MSIX interrupts with %d vectors\n", msgs);
2296 adapter->num_queues = ixgbe_num_queues;
2300 msgs = pci_msi_count(dev);
2301 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2302 device_printf(adapter->dev,"Using MSI interrupt\n");
2308 ixgbe_allocate_pci_resources(struct adapter *adapter)
2311 device_t dev = adapter->dev;
2314 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2317 if (!(adapter->pci_mem)) {
2318 device_printf(dev,"Unable to allocate bus resource: memory\n");
2322 adapter->osdep.mem_bus_space_tag =
2323 rman_get_bustag(adapter->pci_mem);
2324 adapter->osdep.mem_bus_space_handle =
2325 rman_get_bushandle(adapter->pci_mem);
2326 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2328 /* Legacy defaults */
2329 adapter->num_queues = 1;
2330 adapter->hw.back = &adapter->osdep;
2333 ** Now setup MSI or MSI/X, should
2334 ** return us the number of supported
2335 ** vectors. (Will be 1 for MSI)
2337 adapter->msix = ixgbe_setup_msix(adapter);
2342 ixgbe_free_pci_resources(struct adapter * adapter)
2344 struct tx_ring *txr = adapter->tx_rings;
2345 struct rx_ring *rxr = adapter->rx_rings;
2346 device_t dev = adapter->dev;
2349 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2350 memrid = PCIR_BAR(MSIX_82598_BAR);
2352 memrid = PCIR_BAR(MSIX_82599_BAR);
2355 ** There is a slight possibility of a failure mode
2356 ** in attach that will result in entering this function
2357 ** before interrupt resources have been initialized, and
2358 ** in that case we do not want to execute the loops below
2359 ** We can detect this reliably by the state of the adapter
2362 if (adapter->res == NULL)
2366 ** Release all the interrupt resources:
2367 ** notice this is harmless for Legacy or
2368 ** MSI since pointers will always be NULL
2370 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2371 rid = txr->msix + 1;
2372 if (txr->tag != NULL) {
2373 bus_teardown_intr(dev, txr->res, txr->tag);
2376 if (txr->res != NULL)
2377 bus_release_resource(dev, SYS_RES_IRQ, rid, txr->res);
2380 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2381 rid = rxr->msix + 1;
2382 if (rxr->tag != NULL) {
2383 bus_teardown_intr(dev, rxr->res, rxr->tag);
2386 if (rxr->res != NULL)
2387 bus_release_resource(dev, SYS_RES_IRQ, rid, rxr->res);
2390 /* Clean the Legacy or Link interrupt last */
2391 if (adapter->linkvec) /* we are doing MSIX */
2392 rid = adapter->linkvec + 1;
2394 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2396 if (adapter->tag != NULL) {
2397 bus_teardown_intr(dev, adapter->res, adapter->tag);
2398 adapter->tag = NULL;
2400 if (adapter->res != NULL)
2401 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2404 pci_release_msi(dev);
2406 if (adapter->msix_mem != NULL)
2407 bus_release_resource(dev, SYS_RES_MEMORY,
2408 memrid, adapter->msix_mem);
2410 if (adapter->pci_mem != NULL)
2411 bus_release_resource(dev, SYS_RES_MEMORY,
2412 PCIR_BAR(0), adapter->pci_mem);
2417 /*********************************************************************
2419 * Initialize the hardware to a configuration as specified by the
2420 * adapter structure. The controller is reset, the EEPROM is
2421 * verified, the MAC address is set, then the shared initialization
2422 * routines are called.
2424 **********************************************************************/
2426 ixgbe_hardware_init(struct adapter *adapter)
2428 device_t dev = adapter->dev;
2433 /* Issue a global reset */
2434 adapter->hw.adapter_stopped = FALSE;
2435 ixgbe_stop_adapter(&adapter->hw);
2437 /* Make sure we have a good EEPROM before we read from it */
2438 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
2439 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
2443 /* Get Hardware Flow Control setting */
2444 adapter->hw.fc.requested_mode = ixgbe_fc_full;
2445 adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
2446 adapter->hw.fc.low_water = IXGBE_FC_LO;
2447 adapter->hw.fc.high_water = IXGBE_FC_HI;
2448 adapter->hw.fc.send_xon = TRUE;
2450 ret = ixgbe_init_hw(&adapter->hw);
2451 if (ret == IXGBE_ERR_EEPROM_VERSION) {
2452 device_printf(dev, "This device is a pre-production adapter/"
2453 "LOM. Please be aware there may be issues associated "
2454 "with your hardware.\n If you are experiencing problems "
2455 "please contact your Intel or hardware representative "
2456 "who provided you with this hardware.\n");
2457 } else if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2458 device_printf(dev,"Unsupported SFP+ Module\n");
2460 } else if (ret != 0 ) {
2461 device_printf(dev,"Hardware Initialization Failure\n");
2468 /*********************************************************************
2470 * Setup networking device structure and register an interface.
2472 **********************************************************************/
2474 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2477 struct ixgbe_hw *hw = &adapter->hw;
2478 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2480 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2482 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
2483 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2484 ifp->if_mtu = ETHERMTU;
2485 ifp->if_baudrate = 1000000000;
2486 ifp->if_init = ixgbe_init;
2487 ifp->if_softc = adapter;
2488 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2489 ifp->if_ioctl = ixgbe_ioctl;
2490 ifp->if_start = ixgbe_start;
2491 #if __FreeBSD_version >= 800000
2492 ifp->if_transmit = ixgbe_mq_start;
2493 ifp->if_qflush = ixgbe_qflush;
2496 ifp->if_watchdog = NULL;
2497 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
2499 ether_ifattach(ifp, adapter->hw.mac.addr);
2501 adapter->max_frame_size =
2502 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2505 * Tell the upper layer(s) we support long frames.
2507 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2509 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
2510 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2511 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
2513 ifp->if_capenable = ifp->if_capabilities;
2515 if (hw->device_id == IXGBE_DEV_ID_82598AT)
2516 ixgbe_setup_link_speed(hw, (IXGBE_LINK_SPEED_10GB_FULL |
2517 IXGBE_LINK_SPEED_1GB_FULL), TRUE, TRUE);
2519 ixgbe_setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL,
2523 * Specify the media types supported by this adapter and register
2524 * callbacks to update media and link information
2526 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2527 ixgbe_media_status);
2528 ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics |
2530 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2531 ifmedia_add(&adapter->media,
2532 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2533 ifmedia_add(&adapter->media,
2534 IFM_ETHER | IFM_1000_T, 0, NULL);
2536 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2537 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2542 /********************************************************************
2543 * Manage DMA'able memory.
2544 *******************************************************************/
2546 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2550 *(bus_addr_t *) arg = segs->ds_addr;
2555 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2556 struct ixgbe_dma_alloc *dma, int mapflags)
2558 device_t dev = adapter->dev;
2561 r = bus_dma_tag_create(NULL, /* parent */
2562 1, 0, /* alignment, bounds */
2563 BUS_SPACE_MAXADDR, /* lowaddr */
2564 BUS_SPACE_MAXADDR, /* highaddr */
2565 NULL, NULL, /* filter, filterarg */
2568 size, /* maxsegsize */
2569 BUS_DMA_ALLOCNOW, /* flags */
2570 NULL, /* lockfunc */
2571 NULL, /* lockfuncarg */
2574 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2578 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2579 BUS_DMA_NOWAIT, &dma->dma_map);
2581 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2585 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2589 mapflags | BUS_DMA_NOWAIT);
2591 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2595 dma->dma_size = size;
2598 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2600 bus_dma_tag_destroy(dma->dma_tag);
2602 dma->dma_map = NULL;
2603 dma->dma_tag = NULL;
2608 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2610 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2611 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2612 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2613 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2614 bus_dma_tag_destroy(dma->dma_tag);
2618 /*********************************************************************
2620 * Allocate memory for the transmit and receive rings, and then
2621 * the descriptors associated with each, called only once at attach.
2623 **********************************************************************/
2625 ixgbe_allocate_queues(struct adapter *adapter)
2627 device_t dev = adapter->dev;
2628 struct tx_ring *txr;
2629 struct rx_ring *rxr;
2630 int rsize, tsize, error = IXGBE_SUCCESS;
2631 int txconf = 0, rxconf = 0;
2633 /* First allocate the TX ring struct memory */
2634 if (!(adapter->tx_rings =
2635 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2636 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2637 device_printf(dev, "Unable to allocate TX ring memory\n");
2641 txr = adapter->tx_rings;
2643 /* Next allocate the RX */
2644 if (!(adapter->rx_rings =
2645 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2646 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2647 device_printf(dev, "Unable to allocate RX ring memory\n");
2651 rxr = adapter->rx_rings;
2653 /* For the ring itself */
2654 tsize = roundup2(adapter->num_tx_desc *
2655 sizeof(union ixgbe_adv_tx_desc), 4096);
2658 * Now set up the TX queues, txconf is needed to handle the
2659 * possibility that things fail midcourse and we need to
2660 * undo memory gracefully
2662 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2663 /* Set up some basics */
2664 txr = &adapter->tx_rings[i];
2665 txr->adapter = adapter;
2668 /* Initialize the TX side lock */
2669 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2670 device_get_nameunit(dev), txr->me);
2671 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2673 if (ixgbe_dma_malloc(adapter, tsize,
2674 &txr->txdma, BUS_DMA_NOWAIT)) {
2676 "Unable to allocate TX Descriptor memory\n");
2680 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2681 bzero((void *)txr->tx_base, tsize);
2683 /* Now allocate transmit buffers for the ring */
2684 if (ixgbe_allocate_transmit_buffers(txr)) {
2686 "Critical Failure setting up transmit buffers\n");
2690 #if __FreeBSD_version >= 800000
2691 /* Allocate a buf ring */
2692 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2693 M_WAITOK, &txr->tx_mtx);
2698 * Next the RX queues...
2700 rsize = roundup2(adapter->num_rx_desc *
2701 sizeof(union ixgbe_adv_rx_desc), 4096);
2702 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2703 rxr = &adapter->rx_rings[i];
2704 /* Set up some basics */
2705 rxr->adapter = adapter;
2708 /* Initialize the RX side lock */
2709 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2710 device_get_nameunit(dev), rxr->me);
2711 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2713 if (ixgbe_dma_malloc(adapter, rsize,
2714 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2716 "Unable to allocate RxDescriptor memory\n");
2720 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2721 bzero((void *)rxr->rx_base, rsize);
2723 /* Allocate receive buffers for the ring*/
2724 if (ixgbe_allocate_receive_buffers(rxr)) {
2726 "Critical Failure setting up receive buffers\n");
2735 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2736 ixgbe_dma_free(adapter, &rxr->rxdma);
2738 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2739 ixgbe_dma_free(adapter, &txr->txdma);
2740 free(adapter->rx_rings, M_DEVBUF);
2742 free(adapter->tx_rings, M_DEVBUF);
2747 /*********************************************************************
2749 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2750 * the information needed to transmit a packet on the wire. This is
2751 * called only once at attach, setup is done every reset.
2753 **********************************************************************/
2755 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2757 struct adapter *adapter = txr->adapter;
2758 device_t dev = adapter->dev;
2759 struct ixgbe_tx_buf *txbuf;
2763 * Setup DMA descriptor areas.
2765 if ((error = bus_dma_tag_create(NULL, /* parent */
2766 1, 0, /* alignment, bounds */
2767 BUS_SPACE_MAXADDR, /* lowaddr */
2768 BUS_SPACE_MAXADDR, /* highaddr */
2769 NULL, NULL, /* filter, filterarg */
2770 IXGBE_TSO_SIZE, /* maxsize */
2771 ixgbe_num_segs, /* nsegments */
2772 PAGE_SIZE, /* maxsegsize */
2774 NULL, /* lockfunc */
2775 NULL, /* lockfuncarg */
2777 device_printf(dev,"Unable to allocate TX DMA tag\n");
2781 if (!(txr->tx_buffers =
2782 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2783 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2784 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2789 /* Create the descriptor buffer dma maps */
2790 txbuf = txr->tx_buffers;
2791 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2792 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2794 device_printf(dev, "Unable to create TX DMA map\n");
2801 /* We free all, it handles case where we are in the middle */
2802 ixgbe_free_transmit_structures(adapter);
2806 /*********************************************************************
2808 * Initialize a transmit ring.
2810 **********************************************************************/
2812 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2814 struct adapter *adapter = txr->adapter;
2815 struct ixgbe_tx_buf *txbuf;
2818 /* Clear the old ring contents */
2819 bzero((void *)txr->tx_base,
2820 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2822 txr->next_avail_tx_desc = 0;
2823 txr->next_tx_to_clean = 0;
2825 /* Free any existing tx buffers. */
2826 txbuf = txr->tx_buffers;
2827 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2828 if (txbuf->m_head != NULL) {
2829 bus_dmamap_sync(txr->txtag, txbuf->map,
2830 BUS_DMASYNC_POSTWRITE);
2831 bus_dmamap_unload(txr->txtag, txbuf->map);
2832 m_freem(txbuf->m_head);
2833 txbuf->m_head = NULL;
2835 /* Clear the EOP index */
2836 txbuf->eop_index = -1;
2839 /* Set number of descriptors available */
2840 txr->tx_avail = adapter->num_tx_desc;
2842 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2843 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2846 /*********************************************************************
2848 * Initialize all transmit rings.
2850 **********************************************************************/
2852 ixgbe_setup_transmit_structures(struct adapter *adapter)
2854 struct tx_ring *txr = adapter->tx_rings;
2856 for (int i = 0; i < adapter->num_queues; i++, txr++)
2857 ixgbe_setup_transmit_ring(txr);
2862 /*********************************************************************
2864 * Enable transmit unit.
2866 **********************************************************************/
2868 ixgbe_initialize_transmit_units(struct adapter *adapter)
2870 struct tx_ring *txr = adapter->tx_rings;
2871 struct ixgbe_hw *hw = &adapter->hw;
2873 /* Setup the Base and Length of the Tx Descriptor Ring */
2875 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2876 u64 tdba = txr->txdma.dma_paddr;
2878 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2879 (tdba & 0x00000000ffffffffULL));
2880 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2881 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2882 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2884 /* Setup the HW Tx Head and Tail descriptor pointers */
2885 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2886 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2888 /* Setup Transmit Descriptor Cmd Settings */
2889 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2891 txr->watchdog_timer = 0;
2894 if (hw->mac.type == ixgbe_mac_82599EB) {
2896 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2897 dmatxctl |= IXGBE_DMATXCTL_TE;
2898 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2904 /*********************************************************************
2906 * Free all transmit rings.
2908 **********************************************************************/
2910 ixgbe_free_transmit_structures(struct adapter *adapter)
2912 struct tx_ring *txr = adapter->tx_rings;
2914 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2916 ixgbe_free_transmit_buffers(txr);
2917 ixgbe_dma_free(adapter, &txr->txdma);
2918 IXGBE_TX_UNLOCK(txr);
2919 IXGBE_TX_LOCK_DESTROY(txr);
2921 free(adapter->tx_rings, M_DEVBUF);
2924 /*********************************************************************
2926 * Free transmit ring related data structures.
2928 **********************************************************************/
2930 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2932 struct adapter *adapter = txr->adapter;
2933 struct ixgbe_tx_buf *tx_buffer;
2936 INIT_DEBUGOUT("free_transmit_ring: begin");
2938 if (txr->tx_buffers == NULL)
2941 tx_buffer = txr->tx_buffers;
2942 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2943 if (tx_buffer->m_head != NULL) {
2944 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2945 BUS_DMASYNC_POSTWRITE);
2946 bus_dmamap_unload(txr->txtag,
2948 m_freem(tx_buffer->m_head);
2949 tx_buffer->m_head = NULL;
2950 if (tx_buffer->map != NULL) {
2951 bus_dmamap_destroy(txr->txtag,
2953 tx_buffer->map = NULL;
2955 } else if (tx_buffer->map != NULL) {
2956 bus_dmamap_unload(txr->txtag,
2958 bus_dmamap_destroy(txr->txtag,
2960 tx_buffer->map = NULL;
2963 #if __FreeBSD_version >= 800000
2964 if (txr->br != NULL)
2965 buf_ring_free(txr->br, M_DEVBUF);
2967 if (txr->tx_buffers != NULL) {
2968 free(txr->tx_buffers, M_DEVBUF);
2969 txr->tx_buffers = NULL;
2971 if (txr->txtag != NULL) {
2972 bus_dma_tag_destroy(txr->txtag);
2978 /*********************************************************************
2980 * Advanced Context Descriptor setup for VLAN or CSUM
2982 **********************************************************************/
2985 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2987 struct adapter *adapter = txr->adapter;
2988 struct ixgbe_adv_tx_context_desc *TXD;
2989 struct ixgbe_tx_buf *tx_buffer;
2990 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2991 struct ether_vlan_header *eh;
2993 struct ip6_hdr *ip6;
2994 int ehdrlen, ip_hlen = 0;
2997 bool offload = TRUE;
2998 int ctxd = txr->next_avail_tx_desc;
3002 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3005 tx_buffer = &txr->tx_buffers[ctxd];
3006 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3009 ** In advanced descriptors the vlan tag must
3010 ** be placed into the descriptor itself.
3012 if (mp->m_flags & M_VLANTAG) {
3013 vtag = htole16(mp->m_pkthdr.ether_vtag);
3014 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3015 } else if (offload == FALSE)
3019 * Determine where frame payload starts.
3020 * Jump over vlan headers if already present,
3021 * helpful for QinQ too.
3023 eh = mtod(mp, struct ether_vlan_header *);
3024 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3025 etype = ntohs(eh->evl_proto);
3026 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3028 etype = ntohs(eh->evl_encap_proto);
3029 ehdrlen = ETHER_HDR_LEN;
3032 /* Set the ether header length */
3033 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3037 ip = (struct ip *)(mp->m_data + ehdrlen);
3038 ip_hlen = ip->ip_hl << 2;
3039 if (mp->m_len < ehdrlen + ip_hlen)
3042 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3044 case ETHERTYPE_IPV6:
3045 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3046 ip_hlen = sizeof(struct ip6_hdr);
3047 if (mp->m_len < ehdrlen + ip_hlen)
3049 ipproto = ip6->ip6_nxt;
3050 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3057 vlan_macip_lens |= ip_hlen;
3058 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3062 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3063 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3067 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3068 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
3076 /* Now copy bits into descriptor */
3077 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3078 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3079 TXD->seqnum_seed = htole32(0);
3080 TXD->mss_l4len_idx = htole32(0);
3082 tx_buffer->m_head = NULL;
3083 tx_buffer->eop_index = -1;
3085 /* We've consumed the first desc, adjust counters */
3086 if (++ctxd == adapter->num_tx_desc)
3088 txr->next_avail_tx_desc = ctxd;
3094 /**********************************************************************
3096 * Setup work for hardware segmentation offload (TSO) on
3097 * adapters using advanced tx descriptors
3099 **********************************************************************/
3101 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3103 struct adapter *adapter = txr->adapter;
3104 struct ixgbe_adv_tx_context_desc *TXD;
3105 struct ixgbe_tx_buf *tx_buffer;
3106 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3107 u32 mss_l4len_idx = 0;
3109 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
3110 struct ether_vlan_header *eh;
3116 * Determine where frame payload starts.
3117 * Jump over vlan headers if already present
3119 eh = mtod(mp, struct ether_vlan_header *);
3120 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3121 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3123 ehdrlen = ETHER_HDR_LEN;
3125 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3126 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3129 ctxd = txr->next_avail_tx_desc;
3130 tx_buffer = &txr->tx_buffers[ctxd];
3131 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3133 ip = (struct ip *)(mp->m_data + ehdrlen);
3134 if (ip->ip_p != IPPROTO_TCP)
3135 return FALSE; /* 0 */
3137 ip_hlen = ip->ip_hl << 2;
3138 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3139 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3140 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3141 tcp_hlen = th->th_off << 2;
3142 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3144 /* This is used in the transmit desc in encap */
3145 *paylen = mp->m_pkthdr.len - hdrlen;
3147 /* VLAN MACLEN IPLEN */
3148 if (mp->m_flags & M_VLANTAG) {
3149 vtag = htole16(mp->m_pkthdr.ether_vtag);
3150 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3153 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3154 vlan_macip_lens |= ip_hlen;
3155 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3157 /* ADV DTYPE TUCMD */
3158 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3159 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3160 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3161 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3165 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3166 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3167 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3169 TXD->seqnum_seed = htole32(0);
3170 tx_buffer->m_head = NULL;
3171 tx_buffer->eop_index = -1;
3173 if (++ctxd == adapter->num_tx_desc)
3177 txr->next_avail_tx_desc = ctxd;
3182 /**********************************************************************
3184 * Examine each tx_buffer in the used queue. If the hardware is done
3185 * processing the packet then free associated resources. The
3186 * tx_buffer is put back on the free queue.
3188 **********************************************************************/
3190 ixgbe_txeof(struct tx_ring *txr)
3192 struct adapter * adapter = txr->adapter;
3193 struct ifnet *ifp = adapter->ifp;
3194 u32 first, last, done, num_avail;
3196 struct ixgbe_tx_buf *tx_buffer;
3197 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3199 mtx_assert(&txr->tx_mtx, MA_OWNED);
3201 if (txr->tx_avail == adapter->num_tx_desc)
3204 num_avail = txr->tx_avail;
3205 first = txr->next_tx_to_clean;
3207 tx_buffer = &txr->tx_buffers[first];
3208 /* For cleanup we just use legacy struct */
3209 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3210 last = tx_buffer->eop_index;
3214 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3216 ** Get the index of the first descriptor
3217 ** BEYOND the EOP and call that 'done'.
3218 ** I do this so the comparison in the
3219 ** inner while loop below can be simple
3221 if (++last == adapter->num_tx_desc) last = 0;
3224 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3225 BUS_DMASYNC_POSTREAD);
3227 ** Only the EOP descriptor of a packet now has the DD
3228 ** bit set, this is what we look for...
3230 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3231 /* We clean the range of the packet */
3232 while (first != done) {
3233 tx_desc->upper.data = 0;
3234 tx_desc->lower.data = 0;
3235 tx_desc->buffer_addr = 0;
3236 num_avail++; cleaned++;
3238 if (tx_buffer->m_head) {
3240 bus_dmamap_sync(txr->txtag,
3242 BUS_DMASYNC_POSTWRITE);
3243 bus_dmamap_unload(txr->txtag,
3245 m_freem(tx_buffer->m_head);
3246 tx_buffer->m_head = NULL;
3247 tx_buffer->map = NULL;
3249 tx_buffer->eop_index = -1;
3251 if (++first == adapter->num_tx_desc)
3254 tx_buffer = &txr->tx_buffers[first];
3256 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3258 /* See if there is more work now */
3259 last = tx_buffer->eop_index;
3262 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3263 /* Get next done point */
3264 if (++last == adapter->num_tx_desc) last = 0;
3269 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3270 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3272 txr->next_tx_to_clean = first;
3275 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
3276 * it is OK to send packets. If there are no pending descriptors,
3277 * clear the timeout. Otherwise, if some descriptors have been freed,
3278 * restart the timeout.
3280 if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
3281 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3282 /* If all are clean turn off the timer */
3283 if (num_avail == adapter->num_tx_desc) {
3284 txr->watchdog_timer = 0;
3285 txr->tx_avail = num_avail;
3290 /* Some were cleaned, so reset timer */
3292 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
3293 txr->tx_avail = num_avail;
3297 /*********************************************************************
3299 * Get a buffer from system mbuf buffer pool.
3301 **********************************************************************/
3303 ixgbe_get_buf(struct rx_ring *rxr, int i, u8 clean)
3305 struct adapter *adapter = rxr->adapter;
3306 bus_dma_segment_t seg[2];
3307 struct ixgbe_rx_buf *rxbuf;
3308 struct mbuf *mh, *mp;
3314 rxbuf = &rxr->rx_buffers[i];
3316 /* First get our header and payload mbuf */
3317 if (clean & IXGBE_CLEAN_HDR) {
3318 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3322 mh = rxr->rx_buffers[i].m_head;
3325 mh->m_flags |= M_PKTHDR;
3327 if (clean & IXGBE_CLEAN_PKT) {
3328 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3329 M_PKTHDR, adapter->rx_mbuf_sz);
3332 mp->m_len = adapter->rx_mbuf_sz;
3333 mp->m_flags &= ~M_PKTHDR;
3334 } else { /* reusing */
3335 mp = rxr->rx_buffers[i].m_pack;
3336 mp->m_len = adapter->rx_mbuf_sz;
3337 mp->m_flags &= ~M_PKTHDR;
3340 ** Need to create a chain for the following
3341 ** dmamap call at this point.
3344 mh->m_pkthdr.len = mh->m_len + mp->m_len;
3346 /* Get the memory mapping */
3347 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3348 rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3350 printf("GET BUF: dmamap load failure - %d\n", error);
3355 /* Unload old mapping and update buffer struct */
3356 if (rxbuf->m_head != NULL)
3357 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3359 rxbuf->map = rxr->spare_map;
3360 rxr->spare_map = map;
3363 bus_dmamap_sync(rxr->rxtag,
3364 rxbuf->map, BUS_DMASYNC_PREREAD);
3366 /* Update descriptor */
3367 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3368 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3373 ** If we get here, we have an mbuf resource
3374 ** issue, so we discard the incoming packet
3375 ** and attempt to reuse existing mbufs next
3376 ** pass thru the ring, but to do so we must
3377 ** fix up the descriptor which had the address
3378 ** clobbered with writeback info.
3381 adapter->mbuf_header_failed++;
3383 /* Is there a reusable buffer? */
3384 mh = rxr->rx_buffers[i].m_head;
3385 if (mh == NULL) /* Nope, init error */
3387 mp = rxr->rx_buffers[i].m_pack;
3388 if (mp == NULL) /* Nope, init error */
3390 /* Get our old mapping */
3391 rxbuf = &rxr->rx_buffers[i];
3392 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3393 rxbuf->map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3395 /* We really have a problem */
3399 /* Now fix the descriptor as needed */
3400 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3401 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3407 /*********************************************************************
3409 * Allocate memory for rx_buffer structures. Since we use one
3410 * rx_buffer per received packet, the maximum number of rx_buffer's
3411 * that we'll need is equal to the number of receive descriptors
3412 * that we've allocated.
3414 **********************************************************************/
3416 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3418 struct adapter *adapter = rxr->adapter;
3419 device_t dev = adapter->dev;
3420 struct ixgbe_rx_buf *rxbuf;
3421 int i, bsize, error;
3423 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3424 if (!(rxr->rx_buffers =
3425 (struct ixgbe_rx_buf *) malloc(bsize,
3426 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3427 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3433 ** The tag is made to accomodate the largest buffer size
3434 ** with packet split (hence the two segments, even though
3435 ** it may not always use this.
3437 if ((error = bus_dma_tag_create(NULL, /* parent */
3438 1, 0, /* alignment, bounds */
3439 BUS_SPACE_MAXADDR, /* lowaddr */
3440 BUS_SPACE_MAXADDR, /* highaddr */
3441 NULL, NULL, /* filter, filterarg */
3442 MJUM16BYTES, /* maxsize */
3444 MJUMPAGESIZE, /* maxsegsize */
3446 NULL, /* lockfunc */
3447 NULL, /* lockfuncarg */
3449 device_printf(dev, "Unable to create RX DMA tag\n");
3453 /* Create the spare map (used by getbuf) */
3454 error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
3457 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3462 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3463 rxbuf = &rxr->rx_buffers[i];
3464 error = bus_dmamap_create(rxr->rxtag,
3465 BUS_DMA_NOWAIT, &rxbuf->map);
3467 device_printf(dev, "Unable to create RX DMA map\n");
3475 /* Frees all, but can handle partial completion */
3476 ixgbe_free_receive_structures(adapter);
3480 /*********************************************************************
3482 * Initialize a receive ring and its buffers.
3484 **********************************************************************/
3486 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3488 struct adapter *adapter;
3491 struct ixgbe_rx_buf *rxbuf;
3492 struct lro_ctrl *lro = &rxr->lro;
3495 adapter = rxr->adapter;
3499 /* Clear the ring contents */
3500 rsize = roundup2(adapter->num_rx_desc *
3501 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3502 bzero((void *)rxr->rx_base, rsize);
3505 ** Free current RX buffer structs and their mbufs
3507 for (int i = 0; i < adapter->num_rx_desc; i++) {
3508 rxbuf = &rxr->rx_buffers[i];
3509 if (rxbuf->m_head != NULL) {
3510 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3511 BUS_DMASYNC_POSTREAD);
3512 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3513 if (rxbuf->m_head) {
3514 rxbuf->m_head->m_next = rxbuf->m_pack;
3515 m_freem(rxbuf->m_head);
3517 rxbuf->m_head = NULL;
3518 rxbuf->m_pack = NULL;
3522 /* Now refresh the mbufs */
3523 for (j = 0; j < adapter->num_rx_desc; j++) {
3524 if (ixgbe_get_buf(rxr, j, IXGBE_CLEAN_ALL) == ENOBUFS) {
3525 rxr->rx_buffers[j].m_head = NULL;
3526 rxr->rx_buffers[j].m_pack = NULL;
3527 rxr->rx_base[j].read.hdr_addr = 0;
3528 rxr->rx_base[j].read.pkt_addr = 0;
3533 /* Setup our descriptor indices */
3534 rxr->next_to_check = 0;
3535 rxr->last_cleaned = 0;
3536 rxr->lro_enabled = FALSE;
3537 rxr->hdr_split = FALSE;
3539 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3540 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3543 ** Now set up the LRO interface, we
3544 ** also only do head split when LRO
3545 ** is enabled, since so often they
3546 ** are undesireable in similar setups.
3548 if (ifp->if_capenable & IFCAP_LRO) {
3549 int err = tcp_lro_init(lro);
3551 INIT_DEBUGOUT("LRO Initialization failed!\n");
3554 INIT_DEBUGOUT("RX LRO Initialized\n");
3555 rxr->lro_enabled = TRUE;
3556 rxr->hdr_split = TRUE;
3557 lro->ifp = adapter->ifp;
3564 * We need to clean up any buffers allocated
3565 * so far, 'j' is the failing index.
3567 for (int i = 0; i < j; i++) {
3568 rxbuf = &rxr->rx_buffers[i];
3569 if (rxbuf->m_head != NULL) {
3570 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3571 BUS_DMASYNC_POSTREAD);
3572 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3573 m_freem(rxbuf->m_head);
3574 rxbuf->m_head = NULL;
3580 /*********************************************************************
3582 * Initialize all receive rings.
3584 **********************************************************************/
3586 ixgbe_setup_receive_structures(struct adapter *adapter)
3588 struct rx_ring *rxr = adapter->rx_rings;
3591 for (j = 0; j < adapter->num_queues; j++, rxr++)
3592 if (ixgbe_setup_receive_ring(rxr))
3598 * Free RX buffers allocated so far, we will only handle
3599 * the rings that completed, the failing case will have
3600 * cleaned up for itself. 'j' failed, so its the terminus.
3602 for (int i = 0; i < j; ++i) {
3603 rxr = &adapter->rx_rings[i];
3604 for (int n = 0; n < adapter->num_rx_desc; n++) {
3605 struct ixgbe_rx_buf *rxbuf;
3606 rxbuf = &rxr->rx_buffers[n];
3607 if (rxbuf->m_head != NULL) {
3608 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3609 BUS_DMASYNC_POSTREAD);
3610 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3611 m_freem(rxbuf->m_head);
3612 rxbuf->m_head = NULL;
3620 /*********************************************************************
3622 * Setup receive registers and features.
3624 **********************************************************************/
3625 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3628 ixgbe_initialize_receive_units(struct adapter *adapter)
3630 struct rx_ring *rxr = adapter->rx_rings;
3631 struct ixgbe_hw *hw = &adapter->hw;
3632 struct ifnet *ifp = adapter->ifp;
3633 u32 bufsz, rxctrl, fctrl, srrctl, rxcsum;
3634 u32 reta, mrqc = 0, hlreg, random[10];
3638 * Make sure receives are disabled while
3639 * setting up the descriptor ring
3641 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3642 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3643 rxctrl & ~IXGBE_RXCTRL_RXEN);
3645 /* Enable broadcasts */
3646 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3647 fctrl |= IXGBE_FCTRL_BAM;
3648 fctrl |= IXGBE_FCTRL_DPF;
3649 fctrl |= IXGBE_FCTRL_PMCF;
3650 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3652 /* Set for Jumbo Frames? */
3653 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3654 if (ifp->if_mtu > ETHERMTU) {
3655 hlreg |= IXGBE_HLREG0_JUMBOEN;
3656 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3658 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3659 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3661 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3663 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3664 u64 rdba = rxr->rxdma.dma_paddr;
3666 /* Setup the Base and Length of the Rx Descriptor Ring */
3667 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3668 (rdba & 0x00000000ffffffffULL));
3669 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
3670 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3671 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3673 /* Set up the SRRCTL register */
3674 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
3675 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3676 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3678 if (rxr->hdr_split) {
3679 /* Use a standard mbuf for the header */
3680 srrctl |= ((IXGBE_RX_HDR <<
3681 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3682 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3683 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3684 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3685 /* PSRTYPE must be initialized in 82599 */
3686 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3687 IXGBE_PSRTYPE_UDPHDR |
3688 IXGBE_PSRTYPE_IPV4HDR |
3689 IXGBE_PSRTYPE_IPV6HDR;
3690 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3693 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3694 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
3696 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3697 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3698 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3701 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3704 if (adapter->num_queues > 1) {
3708 /* set up random bits */
3709 arc4rand(&random, sizeof(random), 0);
3711 /* Set up the redirection table */
3712 for (i = 0, j = 0; i < 128; i++, j++) {
3713 if (j == adapter->num_queues) j = 0;
3714 reta = (reta << 8) | (j * 0x11);
3716 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3719 /* Now fill our hash function seeds */
3720 for (int i = 0; i < 10; i++)
3721 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3723 /* Perform hash on these packet types */
3724 mrqc = IXGBE_MRQC_RSSEN
3725 | IXGBE_MRQC_RSS_FIELD_IPV4
3726 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3727 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3728 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3729 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3730 | IXGBE_MRQC_RSS_FIELD_IPV6
3731 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3732 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3733 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3734 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3736 /* RSS and RX IPP Checksum are mutually exclusive */
3737 rxcsum |= IXGBE_RXCSUM_PCSD;
3740 if (ifp->if_capenable & IFCAP_RXCSUM)
3741 rxcsum |= IXGBE_RXCSUM_PCSD;
3743 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3744 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3746 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3751 /*********************************************************************
3753 * Free all receive rings.
3755 **********************************************************************/
3757 ixgbe_free_receive_structures(struct adapter *adapter)
3759 struct rx_ring *rxr = adapter->rx_rings;
3761 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3762 struct lro_ctrl *lro = &rxr->lro;
3763 ixgbe_free_receive_buffers(rxr);
3764 /* Free LRO memory */
3766 /* Free the ring memory as well */
3767 ixgbe_dma_free(adapter, &rxr->rxdma);
3770 free(adapter->rx_rings, M_DEVBUF);
3773 /*********************************************************************
3775 * Free receive ring data structures
3777 **********************************************************************/
3779 ixgbe_free_receive_buffers(struct rx_ring *rxr)
3781 struct adapter *adapter = NULL;
3782 struct ixgbe_rx_buf *rxbuf = NULL;
3784 INIT_DEBUGOUT("free_receive_buffers: begin");
3785 adapter = rxr->adapter;
3786 if (rxr->rx_buffers != NULL) {
3787 rxbuf = &rxr->rx_buffers[0];
3788 for (int i = 0; i < adapter->num_rx_desc; i++) {
3789 if (rxbuf->map != NULL) {
3790 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3791 BUS_DMASYNC_POSTREAD);
3792 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3793 bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
3795 if (rxbuf->m_head != NULL) {
3796 m_freem(rxbuf->m_head);
3798 rxbuf->m_head = NULL;
3802 if (rxr->rx_buffers != NULL) {
3803 free(rxr->rx_buffers, M_DEVBUF);
3804 rxr->rx_buffers = NULL;
3806 if (rxr->rxtag != NULL) {
3807 bus_dma_tag_destroy(rxr->rxtag);
3813 /*********************************************************************
3815 * This routine executes in interrupt context. It replenishes
3816 * the mbufs in the descriptor and sends data which has been
3817 * dma'ed into host memory to upper layer.
3819 * We loop at most count times if count is > 0, or until done if
3822 * Return TRUE for more work, FALSE for all clean.
3823 *********************************************************************/
3825 ixgbe_rxeof(struct rx_ring *rxr, int count)
3827 struct adapter *adapter = rxr->adapter;
3828 struct ifnet *ifp = adapter->ifp;
3829 struct lro_ctrl *lro = &rxr->lro;
3830 struct lro_entry *queued;
3833 union ixgbe_adv_rx_desc *cur;
3837 i = rxr->next_to_check;
3838 cur = &rxr->rx_base[i];
3839 staterr = cur->wb.upper.status_error;
3841 if (!(staterr & IXGBE_RXD_STAT_DD)) {
3842 IXGBE_RX_UNLOCK(rxr);
3847 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3848 BUS_DMASYNC_POSTREAD);
3850 while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
3851 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3852 struct mbuf *sendmp, *mh, *mp;
3853 u16 hlen, plen, hdr, vtag;
3854 u8 dopayload, accept_frame, eop;
3858 hlen = plen = vtag = 0;
3859 sendmp = mh = mp = NULL;
3861 /* Sync the buffers */
3862 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
3863 BUS_DMASYNC_POSTREAD);
3866 ** The way the hardware is configured to
3867 ** split, it will ONLY use the header buffer
3868 ** when header split is enabled, otherwise we
3869 ** get normal behavior, ie, both header and
3870 ** payload are DMA'd into the payload buffer.
3872 ** The fmp test is to catch the case where a
3873 ** packet spans multiple descriptors, in that
3874 ** case only the first header is valid.
3876 if ((rxr->hdr_split) && (rxr->fmp == NULL)){
3878 wb.lower.lo_dword.hs_rss.hdr_info);
3879 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3880 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3881 if (hlen > IXGBE_RX_HDR)
3882 hlen = IXGBE_RX_HDR;
3883 plen = le16toh(cur->wb.upper.length);
3884 /* Handle the header mbuf */
3885 mh = rxr->rx_buffers[i].m_head;
3887 dopayload = IXGBE_CLEAN_HDR;
3889 ** Get the payload length, this
3890 ** could be zero if its a small
3894 mp = rxr->rx_buffers[i].m_pack;
3897 mp->m_flags &= ~M_PKTHDR;
3899 mh->m_flags |= M_PKTHDR;
3900 dopayload = IXGBE_CLEAN_ALL;
3901 rxr->rx_split_packets++;
3902 } else { /* small packets */
3903 mh->m_flags &= ~M_PKTHDR;
3908 ** Either no header split, or a
3909 ** secondary piece of a fragmented
3912 mh = rxr->rx_buffers[i].m_pack;
3913 mh->m_flags |= M_PKTHDR;
3914 mh->m_len = le16toh(cur->wb.upper.length);
3915 dopayload = IXGBE_CLEAN_PKT;
3918 if (staterr & IXGBE_RXD_STAT_EOP) {
3924 #ifdef IXGBE_IEEE1588
3925 This code needs to be converted to work here
3926 -----------------------------------------------------
3927 if (unlikely(staterr & IXGBE_RXD_STAT_TS)) {
3930 // Create an mtag and set it up
3931 struct skb_shared_hwtstamps *shhwtstamps =
3934 rd32(IXGBE_TSYNCRXCTL) & IXGBE_TSYNCRXCTL_VALID),
3935 "igb: no RX time stamp available for time stamped packet");
3936 regval = rd32(IXGBE_RXSTMPL);
3937 regval |= (u64)rd32(IXGBE_RXSTMPH) << 32;
3938 // Do time conversion from the register
3939 ns = timecounter_cyc2time(&adapter->clock, regval);
3940 clocksync_update(&adapter->sync, ns);
3941 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
3942 shhwtstamps->hwtstamp = ns_to_ktime(ns);
3943 shhwtstamps->syststamp =
3944 clocksync_hw2sys(&adapter->sync, ns);
3948 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
3953 ** Save the vlan id, because get_buf will
3954 ** clobber the writeback descriptor...
3956 vtag = le16toh(cur->wb.upper.vlan);
3957 if (ixgbe_get_buf(rxr, i, dopayload) != 0) {
3961 /* Initial frame - setup */
3962 if (rxr->fmp == NULL) {
3963 mh->m_flags |= M_PKTHDR;
3964 mh->m_pkthdr.len = mh->m_len;
3965 rxr->fmp = mh; /* Store the first mbuf */
3967 if (mp) { /* Add payload if split */
3968 mh->m_pkthdr.len += mp->m_len;
3969 rxr->lmp = mh->m_next;
3972 /* Chain mbuf's together */
3973 mh->m_flags &= ~M_PKTHDR;
3974 rxr->lmp->m_next = mh;
3975 rxr->lmp = rxr->lmp->m_next;
3976 rxr->fmp->m_pkthdr.len += mh->m_len;
3980 rxr->fmp->m_pkthdr.rcvif = ifp;
3983 /* capture data for AIM */
3984 rxr->bytes += rxr->fmp->m_pkthdr.len;
3985 rxr->rx_bytes += rxr->bytes;
3986 if (ifp->if_capenable & IFCAP_RXCSUM)
3987 ixgbe_rx_checksum(staterr, rxr->fmp);
3989 rxr->fmp->m_pkthdr.csum_flags = 0;
3990 if (staterr & IXGBE_RXD_STAT_VP) {
3991 rxr->fmp->m_pkthdr.ether_vtag = vtag;
3992 rxr->fmp->m_flags |= M_VLANTAG;
3994 #if __FreeBSD_version >= 800000
3995 rxr->fmp->m_pkthdr.flowid = curcpu;
3996 rxr->fmp->m_flags |= M_FLOWID;
4005 /* Reuse loaded DMA map and just update mbuf chain */
4007 mh = rxr->rx_buffers[i].m_head;
4011 mp = rxr->rx_buffers[i].m_pack;
4012 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
4013 mp->m_data = mp->m_ext.ext_buf;
4015 if (adapter->max_frame_size <=
4016 (MCLBYTES - ETHER_ALIGN))
4017 m_adj(mp, ETHER_ALIGN);
4018 if (rxr->fmp != NULL) {
4019 /* handles the whole chain */
4026 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4027 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4029 rxr->last_cleaned = i; /* for updating tail */
4031 if (++i == adapter->num_rx_desc)
4035 ** Now send up to the stack,
4036 ** note the the value of next_to_check
4037 ** is safe because we keep the RX lock
4040 if (sendmp != NULL) {
4042 ** Send to the stack if:
4043 ** - LRO not enabled, or
4044 ** - no LRO resources, or
4045 ** - lro enqueue fails
4047 if ((!rxr->lro_enabled) ||
4048 ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0))))
4049 (*ifp->if_input)(ifp, sendmp);
4052 /* Get next descriptor */
4053 cur = &rxr->rx_base[i];
4054 staterr = cur->wb.upper.status_error;
4056 rxr->next_to_check = i;
4058 /* Advance the IXGB's Receive Queue "Tail Pointer" */
4059 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
4062 * Flush any outstanding LRO work
4064 while (!SLIST_EMPTY(&lro->lro_active)) {
4065 queued = SLIST_FIRST(&lro->lro_active);
4066 SLIST_REMOVE_HEAD(&lro->lro_active, next);
4067 tcp_lro_flush(lro, queued);
4070 IXGBE_RX_UNLOCK(rxr);
4073 ** Leaving with more to clean?
4074 ** then schedule another interrupt.
4076 if (staterr & IXGBE_RXD_STAT_DD) {
4077 ixgbe_rearm_rx_queues(adapter, (u64)(1 << rxr->msix));
4084 /*********************************************************************
4086 * Verify that the hardware indicated that the checksum is valid.
4087 * Inform the stack about the status of checksum so that stack
4088 * doesn't spend time verifying the checksum.
4090 *********************************************************************/
4092 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
4094 u16 status = (u16) staterr;
4095 u8 errors = (u8) (staterr >> 24);
4097 if (status & IXGBE_RXD_STAT_IPCS) {
4099 if (!(errors & IXGBE_RXD_ERR_IPE)) {
4100 /* IP Checksum Good */
4101 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4102 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4105 mp->m_pkthdr.csum_flags = 0;
4107 if (status & IXGBE_RXD_STAT_L4CS) {
4109 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
4110 mp->m_pkthdr.csum_flags |=
4111 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4112 mp->m_pkthdr.csum_data = htons(0xffff);
4120 ** This routine is run via an vlan config EVENT,
4121 ** it enables us to use the HW Filter table since
4122 ** we can get the vlan id. This just creates the
4123 ** entry in the soft version of the VFTA, init will
4124 ** repopulate the real table.
4127 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4129 struct adapter *adapter = ifp->if_softc;
4132 if (ifp->if_softc != arg) /* Not our event */
4135 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4138 index = (vtag >> 5) & 0x7F;
4140 ixgbe_shadow_vfta[index] |= (1 << bit);
4141 ++adapter->num_vlans;
4142 /* Re-init to load the changes */
4143 ixgbe_init(adapter);
4147 ** This routine is run via an vlan
4148 ** unconfig EVENT, remove our entry
4149 ** in the soft vfta.
4152 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4154 struct adapter *adapter = ifp->if_softc;
4157 if (ifp->if_softc != arg)
4160 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4163 index = (vtag >> 5) & 0x7F;
4165 ixgbe_shadow_vfta[index] &= ~(1 << bit);
4166 --adapter->num_vlans;
4167 /* Re-init to load the changes */
4168 ixgbe_init(adapter);
4172 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
4174 struct ixgbe_hw *hw = &adapter->hw;
4179 ** We get here thru init_locked, meaning
4180 ** a soft reset, this has already cleared
4181 ** the VFTA and other state, so if there
4182 ** have been no vlan's registered do nothing.
4184 if (adapter->num_vlans == 0)
4188 ** A soft reset zero's out the VFTA, so
4189 ** we need to repopulate it now.
4191 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
4192 if (ixgbe_shadow_vfta[i] != 0)
4193 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
4194 ixgbe_shadow_vfta[i]);
4196 /* Enable the Filter Table */
4197 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4198 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
4199 ctrl |= IXGBE_VLNCTRL_VFE;
4200 if (hw->mac.type == ixgbe_mac_82598EB)
4201 ctrl |= IXGBE_VLNCTRL_VME;
4202 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
4204 /* On 82599 the VLAN enable is per/queue in RXDCTL */
4205 if (hw->mac.type == ixgbe_mac_82599EB)
4206 for (int i = 0; i < adapter->num_queues; i++) {
4207 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
4208 ctrl |= IXGBE_RXDCTL_VME;
4209 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
4214 ixgbe_enable_intr(struct adapter *adapter)
4216 struct ixgbe_hw *hw = &adapter->hw;
4217 struct tx_ring *txr = adapter->tx_rings;
4218 struct rx_ring *rxr = adapter->rx_rings;
4219 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4222 /* Enable Fan Failure detection */
4223 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4224 mask |= IXGBE_EIMS_GPI_SDP1;
4226 /* 82599 specific interrupts */
4227 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4228 mask |= IXGBE_EIMS_ECC;
4229 mask |= IXGBE_EIMS_GPI_SDP1;
4230 mask |= IXGBE_EIMS_GPI_SDP2;
4233 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4235 /* With RSS we use auto clear */
4236 if (adapter->msix_mem) {
4237 mask = IXGBE_EIMS_ENABLE_MASK;
4238 /* Dont autoclear Link */
4239 mask &= ~IXGBE_EIMS_OTHER;
4240 mask &= ~IXGBE_EIMS_LSC;
4241 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4245 ** Now enable all queues, this is done seperately to
4246 ** allow for handling the extended (beyond 32) MSIX
4247 ** vectors that can be used by 82599
4249 for (int i = 0; i < adapter->num_queues; i++, rxr++)
4250 ixgbe_enable_queue(adapter, rxr->msix);
4251 for (int i = 0; i < adapter->num_queues; i++, txr++)
4252 ixgbe_enable_queue(adapter, txr->msix);
4254 IXGBE_WRITE_FLUSH(hw);
4260 ixgbe_disable_intr(struct adapter *adapter)
4262 if (adapter->msix_mem)
4263 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4264 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
4265 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
4267 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
4268 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
4269 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
4271 IXGBE_WRITE_FLUSH(&adapter->hw);
4276 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
4280 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
4287 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
4289 pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
4296 ** Setup the correct IVAR register for a particular MSIX interrupt
4297 ** (yes this is all very magic and confusing :)
4298 ** - entry is the register array entry
4299 ** - vector is the MSIX vector for this queue
4300 ** - type is RX/TX/MISC
4303 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4305 struct ixgbe_hw *hw = &adapter->hw;
4308 vector |= IXGBE_IVAR_ALLOC_VAL;
4310 switch (hw->mac.type) {
4312 case ixgbe_mac_82598EB:
4314 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4316 entry += (type * 64);
4317 index = (entry >> 2) & 0x1F;
4318 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4319 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4320 ivar |= (vector << (8 * (entry & 0x3)));
4321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4324 case ixgbe_mac_82599EB:
4325 if (type == -1) { /* MISC IVAR */
4326 index = (entry & 1) * 8;
4327 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4328 ivar &= ~(0xFF << index);
4329 ivar |= (vector << index);
4330 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4331 } else { /* RX/TX IVARS */
4332 index = (16 * (entry & 1)) + (8 * type);
4333 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4334 ivar &= ~(0xFF << index);
4335 ivar |= (vector << index);
4336 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4345 ixgbe_configure_ivars(struct adapter *adapter)
4347 struct tx_ring *txr = adapter->tx_rings;
4348 struct rx_ring *rxr = adapter->rx_rings;
4350 for (int i = 0; i < adapter->num_queues; i++, rxr++)
4351 ixgbe_set_ivar(adapter, i, rxr->msix, 0);
4353 for (int i = 0; i < adapter->num_queues; i++, txr++)
4354 ixgbe_set_ivar(adapter, i, txr->msix, 1);
4356 /* For the Link interrupt */
4357 ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
4361 ** ixgbe_sfp_probe - called in the local timer to
4362 ** determine if a port had optics inserted.
4364 static bool ixgbe_sfp_probe(struct adapter *adapter)
4366 struct ixgbe_hw *hw = &adapter->hw;
4367 device_t dev = adapter->dev;
4368 bool result = FALSE;
4370 if ((hw->phy.type == ixgbe_phy_nl) &&
4371 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4372 s32 ret = hw->phy.ops.identify_sfp(hw);
4375 ret = hw->phy.ops.reset(hw);
4376 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4377 device_printf(dev,"Unsupported SFP+ module detected!");
4378 printf(" Reload driver with supported module.\n");
4379 adapter->sfp_probe = FALSE;
4382 device_printf(dev,"SFP+ module detected!\n");
4383 /* We now have supported optics */
4384 adapter->sfp_probe = FALSE;
4392 ** Tasklet handler for MSIX Link interrupts
4393 ** - do outside interrupt since it might sleep
4396 ixgbe_handle_link(void *context, int pending)
4398 struct adapter *adapter = context;
4400 ixgbe_check_link(&adapter->hw,
4401 &adapter->link_speed, &adapter->link_up, 0);
4402 ixgbe_update_link_status(adapter);
4406 ** Tasklet for handling SFP module interrupts
4409 ixgbe_handle_mod(void *context, int pending)
4411 struct adapter *adapter = context;
4412 struct ixgbe_hw *hw = &adapter->hw;
4413 device_t dev = adapter->dev;
4416 err = hw->phy.ops.identify_sfp(hw);
4417 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4419 "Unsupported SFP+ module type was detected.\n");
4422 hw->mac.ops.setup_sfp(hw);
4423 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
4429 ** Tasklet for handling MSF (multispeed fiber) interrupts
4432 ixgbe_handle_msf(void *context, int pending)
4434 struct adapter *adapter = context;
4435 struct ixgbe_hw *hw = &adapter->hw;
4438 if (hw->mac.ops.get_link_capabilities)
4439 hw->mac.ops.get_link_capabilities(hw, &autoneg,
4441 if (hw->mac.ops.setup_link_speed)
4442 hw->mac.ops.setup_link_speed(hw, autoneg, TRUE, TRUE);
4443 ixgbe_check_link(&adapter->hw,
4444 &adapter->link_speed, &adapter->link_up, 0);
4445 ixgbe_update_link_status(adapter);
4449 /**********************************************************************
4451 * Update the board statistics counters.
4453 **********************************************************************/
4455 ixgbe_update_stats_counters(struct adapter *adapter)
4457 struct ifnet *ifp = adapter->ifp;;
4458 struct ixgbe_hw *hw = &adapter->hw;
4459 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4461 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4463 for (int i = 0; i < 8; i++) {
4465 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4467 adapter->stats.mpc[i] += mp;
4468 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4471 /* Hardware workaround, gprc counts missed packets */
4472 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4473 adapter->stats.gprc -= missed_rx;
4475 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4476 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4477 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4480 * Workaround: mprc hardware is incorrectly counting
4481 * broadcasts, so for now we subtract those.
4483 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4484 adapter->stats.bprc += bprc;
4485 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4486 adapter->stats.mprc -= bprc;
4488 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4489 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4490 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4491 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4492 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4493 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4494 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4495 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4497 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4498 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4500 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4501 adapter->stats.lxontxc += lxon;
4502 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4503 adapter->stats.lxofftxc += lxoff;
4504 total = lxon + lxoff;
4506 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4507 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4508 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4509 adapter->stats.gptc -= total;
4510 adapter->stats.mptc -= total;
4511 adapter->stats.ptc64 -= total;
4512 adapter->stats.gotc -= total * ETHER_MIN_LEN;
4514 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4515 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4516 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4517 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4518 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4519 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4520 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4521 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4522 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4523 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4526 /* Fill out the OS statistics structure */
4527 ifp->if_ipackets = adapter->stats.gprc;
4528 ifp->if_opackets = adapter->stats.gptc;
4529 ifp->if_ibytes = adapter->stats.gorc;
4530 ifp->if_obytes = adapter->stats.gotc;
4531 ifp->if_imcasts = adapter->stats.mprc;
4532 ifp->if_collisions = 0;
4535 ifp->if_ierrors = missed_rx + adapter->stats.crcerrs +
4536 adapter->stats.rlec;
4540 /**********************************************************************
4542 * This routine is called only when ixgbe_display_debug_stats is enabled.
4543 * This routine provides a way to take a look at important statistics
4544 * maintained by the driver and hardware.
4546 **********************************************************************/
4548 ixgbe_print_hw_stats(struct adapter * adapter)
4550 device_t dev = adapter->dev;
4553 device_printf(dev,"Std Mbuf Failed = %lu\n",
4554 adapter->mbuf_defrag_failed);
4555 device_printf(dev,"Missed Packets = %llu\n",
4556 (long long)adapter->stats.mpc[0]);
4557 device_printf(dev,"Receive length errors = %llu\n",
4558 ((long long)adapter->stats.roc +
4559 (long long)adapter->stats.ruc));
4560 device_printf(dev,"Crc errors = %llu\n",
4561 (long long)adapter->stats.crcerrs);
4562 device_printf(dev,"Driver dropped packets = %lu\n",
4563 adapter->dropped_pkts);
4564 device_printf(dev, "watchdog timeouts = %ld\n",
4565 adapter->watchdog_events);
4567 device_printf(dev,"XON Rcvd = %llu\n",
4568 (long long)adapter->stats.lxonrxc);
4569 device_printf(dev,"XON Xmtd = %llu\n",
4570 (long long)adapter->stats.lxontxc);
4571 device_printf(dev,"XOFF Rcvd = %llu\n",
4572 (long long)adapter->stats.lxoffrxc);
4573 device_printf(dev,"XOFF Xmtd = %llu\n",
4574 (long long)adapter->stats.lxofftxc);
4576 device_printf(dev,"Total Packets Rcvd = %llu\n",
4577 (long long)adapter->stats.tpr);
4578 device_printf(dev,"Good Packets Rcvd = %llu\n",
4579 (long long)adapter->stats.gprc);
4580 device_printf(dev,"Good Packets Xmtd = %llu\n",
4581 (long long)adapter->stats.gptc);
4582 device_printf(dev,"TSO Transmissions = %lu\n",
4588 /**********************************************************************
4590 * This routine is called only when em_display_debug_stats is enabled.
4591 * This routine provides a way to take a look at important statistics
4592 * maintained by the driver and hardware.
4594 **********************************************************************/
4596 ixgbe_print_debug_info(struct adapter *adapter)
4598 device_t dev = adapter->dev;
4599 struct rx_ring *rxr = adapter->rx_rings;
4600 struct tx_ring *txr = adapter->tx_rings;
4601 struct ixgbe_hw *hw = &adapter->hw;
4603 device_printf(dev,"Error Byte Count = %u \n",
4604 IXGBE_READ_REG(hw, IXGBE_ERRBC));
4606 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4607 struct lro_ctrl *lro = &rxr->lro;
4608 device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
4609 i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
4610 IXGBE_READ_REG(hw, IXGBE_RDT(i)));
4611 device_printf(dev,"RX(%d) Packets Received: %lld\n",
4612 rxr->me, (long long)rxr->rx_packets);
4613 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
4614 rxr->me, (long long)rxr->rx_split_packets);
4615 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
4616 rxr->me, (long)rxr->rx_bytes);
4617 device_printf(dev,"RX(%d) IRQ Handled: %lu\n",
4618 rxr->me, (long)rxr->rx_irq);
4619 device_printf(dev,"RX(%d) LRO Queued= %d\n",
4620 rxr->me, lro->lro_queued);
4621 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
4622 rxr->me, lro->lro_flushed);
4625 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4626 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
4627 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
4628 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
4629 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
4630 txr->me, (long)txr->total_packets);
4631 device_printf(dev,"TX(%d) IRQ Handled: %lu\n",
4632 txr->me, (long)txr->tx_irq);
4633 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
4634 txr->me, (long)txr->no_tx_desc_avail);
4637 device_printf(dev,"Link IRQ Handled: %lu\n",
4638 (long)adapter->link_irq);
4643 ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS)
4647 struct adapter *adapter;
4650 error = sysctl_handle_int(oidp, &result, 0, req);
4652 if (error || !req->newptr)
4656 adapter = (struct adapter *) arg1;
4657 ixgbe_print_hw_stats(adapter);
4663 ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS)
4666 struct adapter *adapter;
4669 error = sysctl_handle_int(oidp, &result, 0, req);
4671 if (error || !req->newptr)
4675 adapter = (struct adapter *) arg1;
4676 ixgbe_print_debug_info(adapter);
4682 ** Set flow control using sysctl:
4683 ** Flow control values:
4690 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4693 struct adapter *adapter;
4695 error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
4700 adapter = (struct adapter *) arg1;
4701 switch (ixgbe_flow_control) {
4702 case ixgbe_fc_rx_pause:
4703 case ixgbe_fc_tx_pause:
4705 adapter->hw.fc.requested_mode = ixgbe_flow_control;
4709 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4712 ixgbe_fc_enable(&adapter->hw, 0);
4717 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
4718 const char *description, int *limit, int value)
4721 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4722 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4723 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4726 #ifdef IXGBE_IEEE1588
4729 ** ixgbe_hwtstamp_ioctl - control hardware time stamping
4731 ** Outgoing time stamping can be enabled and disabled. Play nice and
4732 ** disable it when requested, although it shouldn't case any overhead
4733 ** when no packet needs it. At most one packet in the queue may be
4734 ** marked for time stamping, otherwise it would be impossible to tell
4735 ** for sure to which packet the hardware time stamp belongs.
4737 ** Incoming time stamping has to be configured via the hardware
4738 ** filters. Not all combinations are supported, in particular event
4739 ** type has to be specified. Matching the kind of event packet is
4740 ** not supported, with the exception of "all V2 events regardless of
4745 ixgbe_hwtstamp_ioctl(struct adapter *adapter, struct ifreq *ifr)
4747 struct ixgbe_hw *hw = &adapter->hw;
4748 struct hwtstamp_ctrl *config;
4749 u32 tsync_tx_ctl_bit = IXGBE_TSYNCTXCTL_ENABLED;
4750 u32 tsync_rx_ctl_bit = IXGBE_TSYNCRXCTL_ENABLED;
4751 u32 tsync_rx_ctl_type = 0;
4752 u32 tsync_rx_cfg = 0;
4755 u16 port = 319; /* PTP */
4758 config = (struct hwtstamp_ctrl *) ifr->ifr_data;
4760 /* reserved for future extensions */
4764 switch (config->tx_type) {
4765 case HWTSTAMP_TX_OFF:
4766 tsync_tx_ctl_bit = 0;
4768 case HWTSTAMP_TX_ON:
4769 tsync_tx_ctl_bit = IXGBE_TSYNCTXCTL_ENABLED;
4775 switch (config->rx_filter) {
4776 case HWTSTAMP_FILTER_NONE:
4777 tsync_rx_ctl_bit = 0;
4779 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4780 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4781 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4782 case HWTSTAMP_FILTER_ALL:
4784 * register TSYNCRXCFG must be set, therefore it is not
4785 * possible to time stamp both Sync and Delay_Req messages
4786 * => fall back to time stamping all packets
4788 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_ALL;
4789 config->rx_filter = HWTSTAMP_FILTER_ALL;
4791 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4792 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L4_V1;
4793 tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4796 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4797 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L4_V1;
4798 tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4801 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4802 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4803 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
4804 tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
4807 config->rx_filter = HWTSTAMP_FILTER_SOME;
4809 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4810 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4811 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
4812 tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
4815 config->rx_filter = HWTSTAMP_FILTER_SOME;
4817 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4818 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4819 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4820 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
4821 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
4828 /* enable/disable TX */
4829 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
4830 regval = (regval & ~IXGBE_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
4831 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval);
4833 /* enable/disable RX, define which PTP packets are time stamped */
4834 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
4835 regval = (regval & ~IXGBE_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
4836 regval = (regval & ~0xE) | tsync_rx_ctl_type;
4837 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval);
4838 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCFG, tsync_rx_cfg);
4841 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
4842 * (Ethertype to filter on)
4843 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
4844 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
4846 IXGBE_WRITE_REG(hw, IXGBE_ETQF0, is_l2 ? 0x440088f7 : 0);
4848 /* L4 Queue Filter[0]: only filter by source and destination port */
4849 IXGBE_WRITE_REG(hw, IXGBE_SPQF0, htons(port));
4850 IXGBE_WRITE_REG(hw, IXGBE_IMIREXT(0), is_l4 ?
4851 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
4852 IXGBE_WRITE_REG(hw, IXGBE_IMIR(0), is_l4 ?
4854 | (0<<16) /* immediate interrupt disabled */
4855 | 0 /* (1<<17) bit cleared: do not bypass
4856 destination port check */)
4858 IXGBE_WRITE_REG(hw, IXGBE_FTQF0, is_l4 ?
4860 | (1<<15) /* VF not compared */
4861 | (1<<27) /* Enable Timestamping */
4862 | (7<<28) /* only source port filter enabled,
4863 source/target address and protocol
4865 : ((1<<15) | (15<<28) /* all mask bits set = filter not
4870 adapter->hwtstamp_ctrl = config;
4872 /* clear TX/RX time stamp registers, just to be sure */
4873 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
4874 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
4880 ** ixgbe_read_clock - read raw cycle counter (to be used by time counter)
4882 static cycle_t ixgbe_read_clock(const struct cyclecounter *tc)
4884 struct adapter *adapter =
4885 container_of(tc, struct igb_adapter, cycles);
4886 struct ixgbe_hw *hw = &adapter->hw;
4889 stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
4890 stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32ULL;
4895 #endif /* IXGBE_IEEE1588 */