1 /******************************************************************************
3 Copyright (c) 2001-2009, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
41 /*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44 int ixgbe_display_debug_stats = 0;
46 /*********************************************************************
48 *********************************************************************/
49 char ixgbe_driver_version[] = "1.8.9";
51 /*********************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 *********************************************************************/
61 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
77 /* required last entry */
81 /*********************************************************************
82 * Table of branding strings
83 *********************************************************************/
85 static char *ixgbe_strings[] = {
86 "Intel(R) PRO/10GbE PCI-Express Network Driver"
89 /*********************************************************************
91 *********************************************************************/
92 static int ixgbe_probe(device_t);
93 static int ixgbe_attach(device_t);
94 static int ixgbe_detach(device_t);
95 static int ixgbe_shutdown(device_t);
96 static void ixgbe_start(struct ifnet *);
97 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
98 #if __FreeBSD_version >= 800000
99 static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
100 static int ixgbe_mq_start_locked(struct ifnet *,
101 struct tx_ring *, struct mbuf *);
102 static void ixgbe_qflush(struct ifnet *);
104 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
105 static void ixgbe_watchdog(struct adapter *);
106 static void ixgbe_init(void *);
107 static void ixgbe_init_locked(struct adapter *);
108 static void ixgbe_stop(void *);
109 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
110 static int ixgbe_media_change(struct ifnet *);
111 static void ixgbe_identify_hardware(struct adapter *);
112 static int ixgbe_allocate_pci_resources(struct adapter *);
113 static int ixgbe_allocate_msix(struct adapter *);
114 static int ixgbe_allocate_legacy(struct adapter *);
115 static int ixgbe_allocate_queues(struct adapter *);
116 static int ixgbe_setup_msix(struct adapter *);
117 static void ixgbe_free_pci_resources(struct adapter *);
118 static void ixgbe_local_timer(void *);
119 static int ixgbe_hardware_init(struct adapter *);
120 static void ixgbe_setup_interface(device_t, struct adapter *);
122 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
123 static int ixgbe_setup_transmit_structures(struct adapter *);
124 static void ixgbe_setup_transmit_ring(struct tx_ring *);
125 static void ixgbe_initialize_transmit_units(struct adapter *);
126 static void ixgbe_free_transmit_structures(struct adapter *);
127 static void ixgbe_free_transmit_buffers(struct tx_ring *);
129 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
130 static int ixgbe_setup_receive_structures(struct adapter *);
131 static int ixgbe_setup_receive_ring(struct rx_ring *);
132 static void ixgbe_initialize_receive_units(struct adapter *);
133 static void ixgbe_free_receive_structures(struct adapter *);
134 static void ixgbe_free_receive_buffers(struct rx_ring *);
136 static void ixgbe_init_moderation(struct adapter *);
137 static void ixgbe_enable_intr(struct adapter *);
138 static void ixgbe_disable_intr(struct adapter *);
139 static void ixgbe_update_stats_counters(struct adapter *);
140 static bool ixgbe_txeof(struct tx_ring *);
141 static bool ixgbe_rxeof(struct rx_ring *, int);
142 static void ixgbe_rx_checksum(u32, struct mbuf *);
143 static void ixgbe_set_promisc(struct adapter *);
144 static void ixgbe_disable_promisc(struct adapter *);
145 static void ixgbe_set_multi(struct adapter *);
146 static void ixgbe_print_hw_stats(struct adapter *);
147 static void ixgbe_print_debug_info(struct adapter *);
148 static void ixgbe_update_link_status(struct adapter *);
149 static int ixgbe_get_buf(struct rx_ring *, int, u8);
150 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
151 static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
152 static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
153 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
154 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
155 struct ixgbe_dma_alloc *, int);
156 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
157 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
158 const char *, int *, int);
159 static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
160 static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
161 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
162 static void ixgbe_configure_ivars(struct adapter *);
163 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
165 static void ixgbe_setup_vlan_hw_support(struct adapter *);
166 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
167 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
169 static void ixgbe_update_aim(struct rx_ring *);
171 /* Support for pluggable optic modules */
172 static bool ixgbe_sfp_probe(struct adapter *);
174 /* Legacy (single vector interrupt handler */
175 static void ixgbe_legacy_irq(void *);
177 /* The MSI/X Interrupt handlers */
178 static void ixgbe_msix_tx(void *);
179 static void ixgbe_msix_rx(void *);
180 static void ixgbe_msix_link(void *);
182 /* Deferred interrupt tasklets */
183 static void ixgbe_handle_tx(void *, int);
184 static void ixgbe_handle_rx(void *, int);
185 static void ixgbe_handle_link(void *, int);
186 static void ixgbe_handle_msf(void *, int);
187 static void ixgbe_handle_mod(void *, int);
190 /*********************************************************************
191 * FreeBSD Device Interface Entry Points
192 *********************************************************************/
194 static device_method_t ixgbe_methods[] = {
195 /* Device interface */
196 DEVMETHOD(device_probe, ixgbe_probe),
197 DEVMETHOD(device_attach, ixgbe_attach),
198 DEVMETHOD(device_detach, ixgbe_detach),
199 DEVMETHOD(device_shutdown, ixgbe_shutdown),
203 static driver_t ixgbe_driver = {
204 "ix", ixgbe_methods, sizeof(struct adapter),
207 static devclass_t ixgbe_devclass;
208 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
210 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
211 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
214 ** TUNEABLE PARAMETERS:
218 ** These parameters are used in Adaptive
219 ** Interrupt Moderation. The value is set
220 ** into EITR and controls the interrupt
221 ** frequency. They can be modified but
222 ** be careful in tuning them.
224 static int ixgbe_enable_aim = TRUE;
225 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
226 static int ixgbe_low_latency = IXGBE_LOW_LATENCY;
227 TUNABLE_INT("hw.ixgbe.low_latency", &ixgbe_low_latency);
228 static int ixgbe_ave_latency = IXGBE_AVE_LATENCY;
229 TUNABLE_INT("hw.ixgbe.ave_latency", &ixgbe_ave_latency);
230 static int ixgbe_bulk_latency = IXGBE_BULK_LATENCY;
231 TUNABLE_INT("hw.ixgbe.bulk_latency", &ixgbe_bulk_latency);
233 /* How many packets rxeof tries to clean at a time */
234 static int ixgbe_rx_process_limit = 100;
235 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
237 /* Flow control setting, default to full */
238 static int ixgbe_flow_control = ixgbe_fc_full;
239 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
242 * MSIX should be the default for best performance,
243 * but this allows it to be forced off for testing.
245 static int ixgbe_enable_msix = 1;
246 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
249 * Header split has seemed to be beneficial in
250 * all circumstances tested, so its on by default
251 * however this variable will allow it to be disabled
252 * for some debug purposes.
254 static bool ixgbe_header_split = TRUE;
255 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
258 * Number of Queues, should normally
259 * be left at 0, it then autoconfigures to
260 * the number of cpus. Each queue is a pair
261 * of RX and TX rings with a dedicated interrupt
263 static int ixgbe_num_queues = 0;
264 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
266 /* Number of TX descriptors per ring */
267 static int ixgbe_txd = DEFAULT_TXD;
268 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
270 /* Number of RX descriptors per ring */
271 static int ixgbe_rxd = DEFAULT_RXD;
272 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
274 /* Total number of Interfaces - need for config sanity check */
275 static int ixgbe_total_ports;
278 ** Shadow VFTA table, this is needed because
279 ** the real filter table gets cleared during
280 ** a soft reset and we need to repopulate it.
282 static u32 ixgbe_shadow_vfta[IXGBE_VFTA_SIZE];
285 ** The number of scatter-gather segments
286 ** differs for 82598 and 82599, default to
289 static int ixgbe_num_segs = IXGBE_82598_SCATTER;
291 /*********************************************************************
292 * Device identification routine
294 * ixgbe_probe determines if the driver should be loaded on
295 * adapter based on PCI vendor/device id of the adapter.
297 * return 0 on success, positive on failure
298 *********************************************************************/
301 ixgbe_probe(device_t dev)
303 ixgbe_vendor_info_t *ent;
305 u16 pci_vendor_id = 0;
306 u16 pci_device_id = 0;
307 u16 pci_subvendor_id = 0;
308 u16 pci_subdevice_id = 0;
309 char adapter_name[256];
311 INIT_DEBUGOUT("ixgbe_probe: begin");
313 pci_vendor_id = pci_get_vendor(dev);
314 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
317 pci_device_id = pci_get_device(dev);
318 pci_subvendor_id = pci_get_subvendor(dev);
319 pci_subdevice_id = pci_get_subdevice(dev);
321 ent = ixgbe_vendor_info_array;
322 while (ent->vendor_id != 0) {
323 if ((pci_vendor_id == ent->vendor_id) &&
324 (pci_device_id == ent->device_id) &&
326 ((pci_subvendor_id == ent->subvendor_id) ||
327 (ent->subvendor_id == 0)) &&
329 ((pci_subdevice_id == ent->subdevice_id) ||
330 (ent->subdevice_id == 0))) {
331 sprintf(adapter_name, "%s, Version - %s",
332 ixgbe_strings[ent->index],
333 ixgbe_driver_version);
334 device_set_desc_copy(dev, adapter_name);
343 /*********************************************************************
344 * Device initialization routine
346 * The attach entry point is called when the driver is being loaded.
347 * This routine identifies the type of hardware, allocates all resources
348 * and initializes the hardware.
350 * return 0 on success, positive on failure
351 *********************************************************************/
354 ixgbe_attach(device_t dev)
356 struct adapter *adapter;
362 INIT_DEBUGOUT("ixgbe_attach: begin");
364 /* Allocate, clear, and link in our adapter structure */
365 adapter = device_get_softc(dev);
366 adapter->dev = adapter->osdep.dev = dev;
370 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
372 /* Keep track of optics */
373 pci_device_id = pci_get_device(dev);
374 switch (pci_device_id) {
375 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
376 case IXGBE_DEV_ID_82598EB_CX4 :
377 adapter->optics = IFM_10G_CX4;
379 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
380 case IXGBE_DEV_ID_82598_DA_DUAL_PORT :
381 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
382 case IXGBE_DEV_ID_82598AT :
383 adapter->optics = IFM_10G_SR;
385 case IXGBE_DEV_ID_82598EB_XF_LR :
386 adapter->optics = IFM_10G_LR;
388 case IXGBE_DEV_ID_82599_SFP :
389 adapter->optics = IFM_10G_SR;
390 ixgbe_num_segs = IXGBE_82599_SCATTER;
392 case IXGBE_DEV_ID_82599_KX4 :
393 adapter->optics = IFM_10G_CX4;
394 ixgbe_num_segs = IXGBE_82599_SCATTER;
396 case IXGBE_DEV_ID_82599_XAUI_LOM :
397 ixgbe_num_segs = IXGBE_82599_SCATTER;
403 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
404 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
405 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
406 adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
408 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
409 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
410 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
411 adapter, 0, ixgbe_sysctl_debug, "I", "Debug Info");
413 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
414 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
415 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
416 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
418 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
420 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
421 &ixgbe_enable_aim, 1, "Interrupt Moderation");
423 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
424 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425 OID_AUTO, "low_latency", CTLTYPE_INT|CTLFLAG_RW,
426 &ixgbe_low_latency, 1, "Low Latency");
428 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
429 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430 OID_AUTO, "ave_latency", CTLTYPE_INT|CTLFLAG_RW,
431 &ixgbe_ave_latency, 1, "Average Latency");
433 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
434 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
435 OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
436 &ixgbe_bulk_latency, 1, "Bulk Latency");
438 /* Set up the timer callout */
439 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
441 /* Determine hardware revision */
442 ixgbe_identify_hardware(adapter);
444 /* Do base PCI setup - map BAR0 */
445 if (ixgbe_allocate_pci_resources(adapter)) {
446 device_printf(dev, "Allocation of PCI resources failed\n");
451 /* Do descriptor calc and sanity checks */
452 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
453 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
454 device_printf(dev, "TXD config issue, using default!\n");
455 adapter->num_tx_desc = DEFAULT_TXD;
457 adapter->num_tx_desc = ixgbe_txd;
460 ** With many RX rings it is easy to exceed the
461 ** system mbuf allocation. Tuning nmbclusters
462 ** can alleviate this.
464 if (nmbclusters > 0 ) {
466 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
467 if (s > nmbclusters) {
468 device_printf(dev, "RX Descriptors exceed "
469 "system mbuf max, using default instead!\n");
470 ixgbe_rxd = DEFAULT_RXD;
474 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
475 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
476 device_printf(dev, "RXD config issue, using default!\n");
477 adapter->num_rx_desc = DEFAULT_RXD;
479 adapter->num_rx_desc = ixgbe_rxd;
481 /* Allocate our TX/RX Queues */
482 if (ixgbe_allocate_queues(adapter)) {
487 /* Initialize the shared code */
488 error = ixgbe_init_shared_code(hw);
489 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
491 ** No optics in this port, set up
492 ** so the timer routine will probe
493 ** for later insertion.
495 adapter->sfp_probe = TRUE;
497 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
498 device_printf(dev,"Unsupported SFP+ module detected!\n");
502 device_printf(dev,"Unable to initialize the shared code\n");
507 /* Initialize the hardware */
508 if (ixgbe_hardware_init(adapter)) {
509 device_printf(dev,"Unable to initialize the hardware\n");
514 if ((adapter->msix > 1) && (ixgbe_enable_msix))
515 error = ixgbe_allocate_msix(adapter);
517 error = ixgbe_allocate_legacy(adapter);
521 /* Setup OS specific network interface */
522 ixgbe_setup_interface(dev, adapter);
524 #ifdef IXGBE_IEEE1588
526 ** Setup the timer: IEEE 1588 support
528 adapter->cycles.read = ixgbe_read_clock;
529 adapter->cycles.mask = (u64)-1;
530 adapter->cycles.mult = 1;
531 adapter->cycles.shift = IXGBE_TSYNC_SHIFT;
532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TIMINCA, (1<<24) |
533 IXGBE_TSYNC_CYCLE_TIME * IXGBE_TSYNC_SHIFT);
534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYSTIML, 0x00000000);
535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYSTIMH, 0xFF800000);
537 // JFV - this is not complete yet
540 /* Sysctl for limiting the amount of work done in the taskqueue */
541 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
542 "max number of rx packets to process", &adapter->rx_process_limit,
543 ixgbe_rx_process_limit);
545 /* Initialize statistics */
546 ixgbe_update_stats_counters(adapter);
548 /* Register for VLAN events */
549 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
550 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
551 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
552 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
554 /* let hardware know driver is loaded */
555 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
556 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
557 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
559 INIT_DEBUGOUT("ixgbe_attach: end");
562 ixgbe_free_transmit_structures(adapter);
563 ixgbe_free_receive_structures(adapter);
565 ixgbe_free_pci_resources(adapter);
570 /*********************************************************************
571 * Device removal routine
573 * The detach entry point is called when the driver is being removed.
574 * This routine stops the adapter and deallocates all the resources
575 * that were allocated for driver operation.
577 * return 0 on success, positive on failure
578 *********************************************************************/
581 ixgbe_detach(device_t dev)
583 struct adapter *adapter = device_get_softc(dev);
584 struct tx_ring *txr = adapter->tx_rings;
585 struct rx_ring *rxr = adapter->rx_rings;
588 INIT_DEBUGOUT("ixgbe_detach: begin");
590 /* Make sure VLANS are not using driver */
591 if (adapter->ifp->if_vlantrunk != NULL) {
592 device_printf(dev,"Vlan in use, detach first\n");
596 IXGBE_CORE_LOCK(adapter);
598 IXGBE_CORE_UNLOCK(adapter);
600 for (int i = 0; i < adapter->num_queues; i++, txr++) {
602 taskqueue_drain(txr->tq, &txr->tx_task);
603 taskqueue_free(txr->tq);
607 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
609 taskqueue_drain(rxr->tq, &rxr->rx_task);
610 taskqueue_free(rxr->tq);
614 /* Drain the Link queue */
616 taskqueue_drain(adapter->tq, &adapter->link_task);
617 taskqueue_drain(adapter->tq, &adapter->mod_task);
618 taskqueue_drain(adapter->tq, &adapter->msf_task);
619 taskqueue_free(adapter->tq);
622 /* let hardware know driver is unloading */
623 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
624 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
625 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
627 /* Unregister VLAN events */
628 if (adapter->vlan_attach != NULL)
629 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
630 if (adapter->vlan_detach != NULL)
631 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
633 ether_ifdetach(adapter->ifp);
634 callout_drain(&adapter->timer);
635 ixgbe_free_pci_resources(adapter);
636 bus_generic_detach(dev);
637 if_free(adapter->ifp);
639 ixgbe_free_transmit_structures(adapter);
640 ixgbe_free_receive_structures(adapter);
642 IXGBE_CORE_LOCK_DESTROY(adapter);
646 /*********************************************************************
648 * Shutdown entry point
650 **********************************************************************/
653 ixgbe_shutdown(device_t dev)
655 struct adapter *adapter = device_get_softc(dev);
656 IXGBE_CORE_LOCK(adapter);
658 IXGBE_CORE_UNLOCK(adapter);
663 /*********************************************************************
664 * Transmit entry point
666 * ixgbe_start is called by the stack to initiate a transmit.
667 * The driver will remain in this routine as long as there are
668 * packets to transmit and transmit resources are available.
669 * In case resources are not available stack is notified and
670 * the packet is requeued.
671 **********************************************************************/
674 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
677 struct adapter *adapter = txr->adapter;
679 IXGBE_TX_LOCK_ASSERT(txr);
681 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
684 if (!adapter->link_active)
687 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
689 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
693 if (ixgbe_xmit(txr, &m_head)) {
696 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
697 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
700 /* Send a copy of the frame to the BPF listener */
701 ETHER_BPF_MTAP(ifp, m_head);
703 /* Set timeout in case hardware has problems transmitting */
704 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
711 * Legacy TX start - called by the stack, this
712 * always uses the first tx ring, and should
713 * not be used with multiqueue tx enabled.
716 ixgbe_start(struct ifnet *ifp)
718 struct adapter *adapter = ifp->if_softc;
719 struct tx_ring *txr = adapter->tx_rings;
721 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
723 ixgbe_start_locked(txr, ifp);
724 IXGBE_TX_UNLOCK(txr);
729 #if __FreeBSD_version >= 800000
731 ** Multiqueue Transmit driver
735 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
737 struct adapter *adapter = ifp->if_softc;
741 /* Which queue to use */
742 if ((m->m_flags & M_FLOWID) != 0)
743 i = m->m_pkthdr.flowid % adapter->num_queues;
744 txr = &adapter->tx_rings[i];
746 if (IXGBE_TX_TRYLOCK(txr)) {
747 err = ixgbe_mq_start_locked(ifp, txr, m);
748 IXGBE_TX_UNLOCK(txr);
750 err = drbr_enqueue(ifp, txr->br, m);
756 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
758 struct adapter *adapter = txr->adapter;
762 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
763 err = drbr_enqueue(ifp, txr->br, m);
767 if (m == NULL) /* Called by tasklet */
770 /* If nothing queued go right to xmit */
771 if (drbr_empty(ifp, txr->br)) {
772 if (ixgbe_xmit(txr, &m)) {
773 if (m && (err = drbr_enqueue(ifp, txr->br, m)) != 0)
776 /* Success, update stats */
777 drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
778 /* Send a copy of the frame to the BPF listener */
779 ETHER_BPF_MTAP(ifp, m);
780 /* Set the watchdog */
781 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
784 } else if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
788 if (drbr_empty(ifp, txr->br))
791 /* Process the queue */
793 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
795 next = drbr_dequeue(ifp, txr->br);
798 if (ixgbe_xmit(txr, &next))
800 ETHER_BPF_MTAP(ifp, next);
801 /* Set the watchdog */
802 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
805 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD)
806 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
812 ** Flush all ring buffers
815 ixgbe_qflush(struct ifnet *ifp)
817 struct adapter *adapter = ifp->if_softc;
818 struct tx_ring *txr = adapter->tx_rings;
821 for (int i = 0; i < adapter->num_queues; i++, txr++) {
823 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
825 IXGBE_TX_UNLOCK(txr);
829 #endif /* __FreeBSD_version >= 800000 */
831 /*********************************************************************
834 * ixgbe_ioctl is called when the user wants to configure the
837 * return 0 on success, positive on failure
838 **********************************************************************/
841 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
843 struct adapter *adapter = ifp->if_softc;
844 struct ifreq *ifr = (struct ifreq *) data;
846 struct ifaddr *ifa = (struct ifaddr *) data;
853 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
854 if (ifa->ifa_addr->sa_family == AF_INET) {
855 ifp->if_flags |= IFF_UP;
856 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
857 IXGBE_CORE_LOCK(adapter);
858 ixgbe_init_locked(adapter);
859 IXGBE_CORE_UNLOCK(adapter);
861 arp_ifinit(ifp, ifa);
864 ether_ioctl(ifp, command, data);
867 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
868 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
871 IXGBE_CORE_LOCK(adapter);
872 ifp->if_mtu = ifr->ifr_mtu;
873 adapter->max_frame_size =
874 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
875 ixgbe_init_locked(adapter);
876 IXGBE_CORE_UNLOCK(adapter);
880 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
881 IXGBE_CORE_LOCK(adapter);
882 if (ifp->if_flags & IFF_UP) {
883 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
884 if ((ifp->if_flags ^ adapter->if_flags) &
885 (IFF_PROMISC | IFF_ALLMULTI)) {
886 ixgbe_disable_promisc(adapter);
887 ixgbe_set_promisc(adapter);
890 ixgbe_init_locked(adapter);
892 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
894 adapter->if_flags = ifp->if_flags;
895 IXGBE_CORE_UNLOCK(adapter);
899 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
900 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
901 IXGBE_CORE_LOCK(adapter);
902 ixgbe_disable_intr(adapter);
903 ixgbe_set_multi(adapter);
904 ixgbe_enable_intr(adapter);
905 IXGBE_CORE_UNLOCK(adapter);
910 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
911 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
915 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
916 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
917 if (mask & IFCAP_HWCSUM)
918 ifp->if_capenable ^= IFCAP_HWCSUM;
919 if (mask & IFCAP_TSO4)
920 ifp->if_capenable ^= IFCAP_TSO4;
921 if (mask & IFCAP_LRO)
922 ifp->if_capenable ^= IFCAP_LRO;
923 if (mask & IFCAP_VLAN_HWTAGGING)
924 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
925 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
927 VLAN_CAPABILITIES(ifp);
931 #ifdef IXGBE_IEEE1588
933 ** IOCTL support for Precision Time (IEEE 1588) Support
936 error = ixgbe_hwtstamp_ioctl(adapter, ifp);
941 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
942 error = ether_ioctl(ifp, command, data);
949 /*********************************************************************
950 * Watchdog entry point
952 * This routine is called by the local timer
953 * to detect hardware hangs .
955 **********************************************************************/
958 ixgbe_watchdog(struct adapter *adapter)
960 device_t dev = adapter->dev;
961 struct tx_ring *txr = adapter->tx_rings;
962 struct ixgbe_hw *hw = &adapter->hw;
963 bool tx_hang = FALSE;
965 IXGBE_CORE_LOCK_ASSERT(adapter);
968 * The timer is set to 5 every time ixgbe_start() queues a packet.
969 * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
970 * least one descriptor.
971 * Finally, anytime all descriptors are clean the timer is
974 for (int i = 0; i < adapter->num_queues; i++, txr++) {
978 if (txr->watchdog_timer == 0 || --txr->watchdog_timer) {
979 IXGBE_TX_UNLOCK(txr);
982 head = IXGBE_READ_REG(hw, IXGBE_TDH(i));
983 tail = IXGBE_READ_REG(hw, IXGBE_TDT(i));
984 if (head == tail) { /* last minute check */
985 IXGBE_TX_UNLOCK(txr);
988 /* Well, seems something is really hung */
990 IXGBE_TX_UNLOCK(txr);
994 if (tx_hang == FALSE)
998 * If we are in this routine because of pause frames, then don't
999 * reset the hardware.
1001 if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
1002 txr = adapter->tx_rings; /* reset pointer */
1003 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1005 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
1006 IXGBE_TX_UNLOCK(txr);
1012 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1013 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1014 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
1015 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
1016 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
1017 device_printf(dev,"TX(%d) desc avail = %d,"
1018 "Next TX to Clean = %d\n",
1019 i, txr->tx_avail, txr->next_tx_to_clean);
1021 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1022 adapter->watchdog_events++;
1024 ixgbe_init_locked(adapter);
1027 /*********************************************************************
1030 * This routine is used in two ways. It is used by the stack as
1031 * init entry point in network interface structure. It is also used
1032 * by the driver as a hw/sw initialization routine to get to a
1035 * return 0 on success, positive on failure
1036 **********************************************************************/
1037 #define IXGBE_MHADD_MFS_SHIFT 16
1040 ixgbe_init_locked(struct adapter *adapter)
1042 struct ifnet *ifp = adapter->ifp;
1043 device_t dev = adapter->dev;
1044 struct ixgbe_hw *hw;
1045 u32 k, txdctl, mhadd, gpie;
1049 INIT_DEBUGOUT("ixgbe_init: begin");
1052 mtx_assert(&adapter->core_mtx, MA_OWNED);
1054 ixgbe_stop(adapter);
1056 /* Get the latest mac address, User can use a LAA */
1057 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1058 IXGBE_ETH_LENGTH_OF_ADDRESS);
1059 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, 1);
1060 adapter->hw.addr_ctrl.rar_used_count = 1;
1062 /* Initialize the hardware */
1063 if (ixgbe_hardware_init(adapter)) {
1064 device_printf(dev, "Unable to initialize the hardware\n");
1068 /* Prepare transmit descriptors and buffers */
1069 if (ixgbe_setup_transmit_structures(adapter)) {
1070 device_printf(dev,"Could not setup transmit structures\n");
1071 ixgbe_stop(adapter);
1075 ixgbe_initialize_transmit_units(adapter);
1077 /* Setup Multicast table */
1078 ixgbe_set_multi(adapter);
1081 ** Determine the correct mbuf pool
1082 ** for doing jumbo/headersplit
1084 if (ifp->if_mtu > ETHERMTU)
1085 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1087 adapter->rx_mbuf_sz = MCLBYTES;
1089 /* Prepare receive descriptors and buffers */
1090 if (ixgbe_setup_receive_structures(adapter)) {
1091 device_printf(dev,"Could not setup receive structures\n");
1092 ixgbe_stop(adapter);
1096 /* Configure RX settings */
1097 ixgbe_initialize_receive_units(adapter);
1099 /* Configure Interrupt Moderation */
1100 ixgbe_init_moderation(adapter);
1102 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1104 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1105 gpie |= IXGBE_SDP1_GPIEN;
1106 gpie |= IXGBE_SDP2_GPIEN;
1109 /* Enable Fan Failure Interrupt */
1110 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1111 gpie |= IXGBE_SDP1_GPIEN;
1113 if (adapter->msix > 1) {
1114 /* Enable Enhanced MSIX mode */
1115 gpie |= IXGBE_GPIE_MSIX_MODE;
1116 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1119 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
1121 /* Set the various hardware offload abilities */
1122 ifp->if_hwassist = 0;
1123 if (ifp->if_capenable & IFCAP_TSO4)
1124 ifp->if_hwassist |= CSUM_TSO;
1125 if (ifp->if_capenable & IFCAP_TXCSUM)
1126 ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
1129 if (ifp->if_mtu > ETHERMTU) {
1130 mhadd = IXGBE_READ_REG(&adapter->hw, IXGBE_MHADD);
1131 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1132 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1133 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MHADD, mhadd);
1136 /* Now enable all the queues */
1138 for (int i = 0; i < adapter->num_queues; i++) {
1139 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
1140 txdctl |= IXGBE_TXDCTL_ENABLE;
1141 /* Set WTHRESH to 8, burst writeback */
1142 txdctl |= (8 << 16);
1143 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
1146 for (int i = 0; i < adapter->num_queues; i++) {
1147 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
1148 /* PTHRESH set to 32 */
1150 rxdctl |= IXGBE_RXDCTL_ENABLE;
1151 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
1152 for (k = 0; k < 10; k++) {
1153 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1154 IXGBE_RXDCTL_ENABLE)
1160 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1163 /* Set up VLAN offloads and filter */
1164 ixgbe_setup_vlan_hw_support(adapter);
1166 /* Enable Receive engine */
1167 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1168 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1169 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1170 rxctrl |= IXGBE_RXCTRL_RXEN;
1171 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1173 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1175 /* Set up MSI/X routing */
1176 if (ixgbe_enable_msix)
1177 ixgbe_configure_ivars(adapter);
1178 else { /* Simple settings for Legacy/MSI */
1179 ixgbe_set_ivar(adapter, 0, 0, 0);
1180 ixgbe_set_ivar(adapter, 0, 0, 1);
1183 ixgbe_enable_intr(adapter);
1186 ** Check on any SFP devices that
1187 ** need to be kick-started
1189 err = hw->phy.ops.identify(hw);
1190 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1192 "Unsupported SFP+ module type was detected.\n");
1196 if (ixgbe_is_sfp(hw)) {
1197 if (hw->phy.multispeed_fiber) {
1198 hw->mac.ops.setup_sfp(hw);
1199 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1201 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1203 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1205 /* Now inform the stack we're ready */
1206 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1207 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1213 ixgbe_init(void *arg)
1215 struct adapter *adapter = arg;
1217 IXGBE_CORE_LOCK(adapter);
1218 ixgbe_init_locked(adapter);
1219 IXGBE_CORE_UNLOCK(adapter);
1226 ** MSIX Interrupt Handlers and Tasklets
1231 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1233 struct ixgbe_hw *hw = &adapter->hw;
1234 u64 queue = (u64)(1 << vector);
1237 if (hw->mac.type == ixgbe_mac_82598EB) {
1238 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1239 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1241 mask = (queue & 0xFFFFFFFF);
1243 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1244 mask = (queue >> 32);
1246 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1251 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1253 struct ixgbe_hw *hw = &adapter->hw;
1254 u64 queue = (u64)(1 << vector);
1257 if (hw->mac.type == ixgbe_mac_82598EB) {
1258 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1259 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1261 mask = (queue & 0xFFFFFFFF);
1263 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1264 mask = (queue >> 32);
1266 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1271 ixgbe_rearm_rx_queues(struct adapter *adapter, u64 queues)
1275 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1276 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
1277 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1279 mask = (queues & 0xFFFFFFFF);
1280 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
1281 mask = (queues >> 32);
1282 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
1287 ixgbe_handle_rx(void *context, int pending)
1289 struct rx_ring *rxr = context;
1290 struct adapter *adapter = rxr->adapter;
1291 u32 loop = MAX_LOOP;
1295 more = ixgbe_rxeof(rxr, -1);
1296 } while (loop-- && more);
1297 /* Reenable this interrupt */
1298 ixgbe_enable_queue(adapter, rxr->msix);
1302 ixgbe_handle_tx(void *context, int pending)
1304 struct tx_ring *txr = context;
1305 struct adapter *adapter = txr->adapter;
1306 struct ifnet *ifp = adapter->ifp;
1307 u32 loop = MAX_LOOP;
1312 more = ixgbe_txeof(txr);
1313 } while (loop-- && more);
1315 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1316 #if __FreeBSD_version >= 800000
1317 if (!drbr_empty(ifp, txr->br))
1318 ixgbe_mq_start_locked(ifp, txr, NULL);
1320 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1321 ixgbe_start_locked(txr, ifp);
1325 IXGBE_TX_UNLOCK(txr);
1326 /* Reenable this interrupt */
1327 ixgbe_enable_queue(adapter, txr->msix);
1331 /*********************************************************************
1333 * Legacy Interrupt Service routine
1335 **********************************************************************/
1338 ixgbe_legacy_irq(void *arg)
1340 struct adapter *adapter = arg;
1341 struct ixgbe_hw *hw = &adapter->hw;
1342 struct tx_ring *txr = adapter->tx_rings;
1343 struct rx_ring *rxr = adapter->rx_rings;
1345 u32 reg_eicr, loop = MAX_LOOP;
1348 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1350 if (reg_eicr == 0) {
1351 ixgbe_enable_intr(adapter);
1355 if (ixgbe_rxeof(rxr, adapter->rx_process_limit))
1356 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1361 more = ixgbe_txeof(txr);
1362 } while (loop-- && more);
1363 IXGBE_TX_UNLOCK(txr);
1366 taskqueue_enqueue(txr->tq, &txr->tx_task);
1368 /* Check for fan failure */
1369 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1370 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1371 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1372 "REPLACE IMMEDIATELY!!\n");
1373 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1376 /* Link status change */
1377 if (reg_eicr & IXGBE_EICR_LSC) {
1378 ixgbe_check_link(&adapter->hw,
1379 &adapter->link_speed, &adapter->link_up, 0);
1380 ixgbe_update_link_status(adapter);
1383 /* Update interrupt rate */
1384 if (ixgbe_enable_aim == TRUE)
1385 ixgbe_update_aim(rxr);
1387 ixgbe_enable_intr(adapter);
1392 /*********************************************************************
1394 * MSI TX Interrupt Service routine
1396 **********************************************************************/
1398 ixgbe_msix_tx(void *arg)
1400 struct tx_ring *txr = arg;
1401 struct adapter *adapter = txr->adapter;
1404 ixgbe_disable_queue(adapter, txr->msix);
1408 more = ixgbe_txeof(txr);
1409 IXGBE_TX_UNLOCK(txr);
1411 taskqueue_enqueue(txr->tq, &txr->tx_task);
1412 else /* Reenable this interrupt */
1413 ixgbe_enable_queue(adapter, txr->msix);
1418 /*********************************************************************
1420 * MSIX RX Interrupt Service routine
1422 **********************************************************************/
1424 ixgbe_msix_rx(void *arg)
1426 struct rx_ring *rxr = arg;
1427 struct adapter *adapter = rxr->adapter;
1430 ixgbe_disable_queue(adapter, rxr->msix);
1433 more = ixgbe_rxeof(rxr, adapter->rx_process_limit);
1435 /* Update interrupt rate */
1436 if (ixgbe_enable_aim == TRUE)
1437 ixgbe_update_aim(rxr);
1440 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1442 ixgbe_enable_queue(adapter, rxr->msix);
1448 ixgbe_msix_link(void *arg)
1450 struct adapter *adapter = arg;
1451 struct ixgbe_hw *hw = &adapter->hw;
1454 ++adapter->link_irq;
1456 /* First get the cause */
1457 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1458 /* Clear interrupt with write */
1459 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1461 /* Link status change */
1462 if (reg_eicr & IXGBE_EICR_LSC)
1463 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1465 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1466 if (reg_eicr & IXGBE_EICR_ECC) {
1467 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1468 "Please Reboot!!\n");
1469 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1470 } else if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1471 /* Clear the interrupt */
1472 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1473 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1474 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1475 /* Clear the interrupt */
1476 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1477 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1481 /* Check for fan failure */
1482 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1483 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1484 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1485 "REPLACE IMMEDIATELY!!\n");
1486 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1489 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1494 ** Routine to do adjust the RX EITR value based on traffic,
1495 ** its a simple three state model, but seems to help.
1497 ** Note that the three EITR values are tuneable using
1498 ** sysctl in real time. The feature can be effectively
1499 ** nullified by setting them equal.
1501 #define BULK_THRESHOLD 10000
1502 #define AVE_THRESHOLD 1600
1505 ixgbe_update_aim(struct rx_ring *rxr)
1507 struct adapter *adapter = rxr->adapter;
1510 /* Update interrupt moderation based on traffic */
1511 olditr = rxr->eitr_setting;
1514 /* Idle, don't change setting */
1515 if (rxr->bytes == 0)
1518 if (olditr == ixgbe_low_latency) {
1519 if (rxr->bytes > AVE_THRESHOLD)
1520 newitr = ixgbe_ave_latency;
1521 } else if (olditr == ixgbe_ave_latency) {
1522 if (rxr->bytes < AVE_THRESHOLD)
1523 newitr = ixgbe_low_latency;
1524 else if (rxr->bytes > BULK_THRESHOLD)
1525 newitr = ixgbe_bulk_latency;
1526 } else if (olditr == ixgbe_bulk_latency) {
1527 if (rxr->bytes < BULK_THRESHOLD)
1528 newitr = ixgbe_ave_latency;
1531 if (olditr != newitr) {
1532 /* Change interrupt rate */
1533 rxr->eitr_setting = newitr;
1534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rxr->me),
1535 newitr | (newitr << 16));
1543 ixgbe_init_moderation(struct adapter *adapter)
1545 struct rx_ring *rxr = adapter->rx_rings;
1546 struct tx_ring *txr = adapter->tx_rings;
1548 /* Single interrupt - MSI or Legacy? */
1549 if (adapter->msix < 2) {
1550 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(0), 100);
1554 /* TX irq moderation rate is fixed */
1555 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1556 IXGBE_WRITE_REG(&adapter->hw,
1557 IXGBE_EITR(txr->msix), ixgbe_ave_latency);
1558 txr->watchdog_timer = FALSE;
1561 /* RX moderation will be adapted over time, set default */
1562 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1563 IXGBE_WRITE_REG(&adapter->hw,
1564 IXGBE_EITR(rxr->msix), ixgbe_low_latency);
1567 /* Set Link moderation */
1568 IXGBE_WRITE_REG(&adapter->hw,
1569 IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
1573 /*********************************************************************
1575 * Media Ioctl callback
1577 * This routine is called whenever the user queries the status of
1578 * the interface using ifconfig.
1580 **********************************************************************/
1582 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1584 struct adapter *adapter = ifp->if_softc;
1586 INIT_DEBUGOUT("ixgbe_media_status: begin");
1587 IXGBE_CORE_LOCK(adapter);
1588 ixgbe_update_link_status(adapter);
1590 ifmr->ifm_status = IFM_AVALID;
1591 ifmr->ifm_active = IFM_ETHER;
1593 if (!adapter->link_active) {
1594 IXGBE_CORE_UNLOCK(adapter);
1598 ifmr->ifm_status |= IFM_ACTIVE;
1600 switch (adapter->link_speed) {
1601 case IXGBE_LINK_SPEED_1GB_FULL:
1602 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1604 case IXGBE_LINK_SPEED_10GB_FULL:
1605 ifmr->ifm_active |= adapter->optics | IFM_FDX;
1609 IXGBE_CORE_UNLOCK(adapter);
1614 /*********************************************************************
1616 * Media Ioctl callback
1618 * This routine is called when the user changes speed/duplex using
1619 * media/mediopt option with ifconfig.
1621 **********************************************************************/
1623 ixgbe_media_change(struct ifnet * ifp)
1625 struct adapter *adapter = ifp->if_softc;
1626 struct ifmedia *ifm = &adapter->media;
1628 INIT_DEBUGOUT("ixgbe_media_change: begin");
1630 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1633 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1635 adapter->hw.mac.autoneg = TRUE;
1636 adapter->hw.phy.autoneg_advertised =
1637 IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
1640 device_printf(adapter->dev, "Only auto media type\n");
1647 /*********************************************************************
1649 * This routine maps the mbufs to tx descriptors.
1650 * WARNING: while this code is using an MQ style infrastructure,
1651 * it would NOT work as is with more than 1 queue.
1653 * return 0 on success, positive on failure
1654 **********************************************************************/
1657 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1659 struct adapter *adapter = txr->adapter;
1660 u32 olinfo_status = 0, cmd_type_len;
1662 int i, j, error, nsegs;
1663 int first, last = 0;
1664 struct mbuf *m_head;
1665 bus_dma_segment_t segs[ixgbe_num_segs];
1667 struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1668 union ixgbe_adv_tx_desc *txd = NULL;
1672 /* Basic descriptor defines */
1673 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1674 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1676 if (m_head->m_flags & M_VLANTAG)
1677 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1679 /* Do a clean if descriptors are low */
1680 if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
1682 /* Now do we at least have a minimal? */
1683 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD)
1688 * Important to capture the first descriptor
1689 * used because it will contain the index of
1690 * the one we tell the hardware to report back
1692 first = txr->next_avail_tx_desc;
1693 txbuf = &txr->tx_buffers[first];
1694 txbuf_mapped = txbuf;
1698 * Map the packet for DMA.
1700 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1701 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1703 if (error == EFBIG) {
1706 m = m_defrag(*m_headp, M_DONTWAIT);
1708 adapter->mbuf_defrag_failed++;
1716 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1717 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1719 if (error == ENOMEM) {
1720 adapter->no_tx_dma_setup++;
1722 } else if (error != 0) {
1723 adapter->no_tx_dma_setup++;
1728 } else if (error == ENOMEM) {
1729 adapter->no_tx_dma_setup++;
1731 } else if (error != 0) {
1732 adapter->no_tx_dma_setup++;
1738 /* Make certain there are enough descriptors */
1739 if (nsegs > txr->tx_avail - 2) {
1740 txr->no_tx_desc_avail++;
1747 ** Set up the appropriate offload context
1748 ** this becomes the first descriptor of
1751 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1752 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1753 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1754 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1755 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1756 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1760 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1761 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1763 #ifdef IXGBE_IEEE1588
1764 /* This is changing soon to an mtag detection */
1765 if (we detect this mbuf has a TSTAMP mtag)
1766 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
1769 /* Record payload length */
1771 olinfo_status |= m_head->m_pkthdr.len <<
1772 IXGBE_ADVTXD_PAYLEN_SHIFT;
1774 i = txr->next_avail_tx_desc;
1775 for (j = 0; j < nsegs; j++) {
1779 txbuf = &txr->tx_buffers[i];
1780 txd = &txr->tx_base[i];
1781 seglen = segs[j].ds_len;
1782 segaddr = htole64(segs[j].ds_addr);
1784 txd->read.buffer_addr = segaddr;
1785 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1786 cmd_type_len |seglen);
1787 txd->read.olinfo_status = htole32(olinfo_status);
1788 last = i; /* Next descriptor that will get completed */
1790 if (++i == adapter->num_tx_desc)
1793 txbuf->m_head = NULL;
1794 txbuf->eop_index = -1;
1797 txd->read.cmd_type_len |=
1798 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1799 txr->tx_avail -= nsegs;
1800 txr->next_avail_tx_desc = i;
1802 txbuf->m_head = m_head;
1804 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1806 /* Set the index of the descriptor that will be marked done */
1807 txbuf = &txr->tx_buffers[first];
1808 txbuf->eop_index = last;
1810 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1811 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1813 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1814 * hardware that this frame is available to transmit.
1816 ++txr->total_packets;
1817 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1821 bus_dmamap_unload(txr->txtag, txbuf->map);
1827 ixgbe_set_promisc(struct adapter *adapter)
1831 struct ifnet *ifp = adapter->ifp;
1833 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1835 if (ifp->if_flags & IFF_PROMISC) {
1836 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1837 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1838 } else if (ifp->if_flags & IFF_ALLMULTI) {
1839 reg_rctl |= IXGBE_FCTRL_MPE;
1840 reg_rctl &= ~IXGBE_FCTRL_UPE;
1841 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1847 ixgbe_disable_promisc(struct adapter * adapter)
1851 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1853 reg_rctl &= (~IXGBE_FCTRL_UPE);
1854 reg_rctl &= (~IXGBE_FCTRL_MPE);
1855 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1861 /*********************************************************************
1864 * This routine is called whenever multicast address list is updated.
1866 **********************************************************************/
1867 #define IXGBE_RAR_ENTRIES 16
1870 ixgbe_set_multi(struct adapter *adapter)
1873 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1875 struct ifmultiaddr *ifma;
1877 struct ifnet *ifp = adapter->ifp;
1879 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1881 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1882 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1883 if (ifp->if_flags & IFF_PROMISC)
1884 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1885 else if (ifp->if_flags & IFF_ALLMULTI) {
1886 fctrl |= IXGBE_FCTRL_MPE;
1887 fctrl &= ~IXGBE_FCTRL_UPE;
1889 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1891 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1893 #if __FreeBSD_version < 800000
1896 if_maddr_rlock(ifp);
1898 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1899 if (ifma->ifma_addr->sa_family != AF_LINK)
1901 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1902 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1903 IXGBE_ETH_LENGTH_OF_ADDRESS);
1906 #if __FreeBSD_version < 800000
1907 IF_ADDR_UNLOCK(ifp);
1909 if_maddr_runlock(ifp);
1913 ixgbe_update_mc_addr_list(&adapter->hw,
1914 update_ptr, mcnt, ixgbe_mc_array_itr);
1920 * This is an iterator function now needed by the multicast
1921 * shared code. It simply feeds the shared code routine the
1922 * addresses in the array of ixgbe_set_multi() one by one.
1925 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1927 u8 *addr = *update_ptr;
1931 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1932 *update_ptr = newptr;
1937 /*********************************************************************
1940 * This routine checks for link status,updates statistics,
1941 * and runs the watchdog timer.
1943 **********************************************************************/
1946 ixgbe_local_timer(void *arg)
1948 struct adapter *adapter = arg;
1949 struct ifnet *ifp = adapter->ifp;
1951 mtx_assert(&adapter->core_mtx, MA_OWNED);
1953 /* Check for pluggable optics */
1954 if (adapter->sfp_probe)
1955 if (!ixgbe_sfp_probe(adapter))
1956 goto out; /* Nothing to do */
1958 ixgbe_update_link_status(adapter);
1959 ixgbe_update_stats_counters(adapter);
1960 if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1961 ixgbe_print_hw_stats(adapter);
1964 * Each tick we check the watchdog
1965 * to protect against hardware hangs.
1967 ixgbe_watchdog(adapter);
1970 /* Trigger an RX interrupt on all queues */
1971 ixgbe_rearm_rx_queues(adapter, adapter->rx_mask);
1973 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1977 ** Note: this routine updates the OS on the link state
1978 ** the real check of the hardware only happens with
1979 ** a link interrupt.
1982 ixgbe_update_link_status(struct adapter *adapter)
1984 struct ifnet *ifp = adapter->ifp;
1985 struct tx_ring *txr = adapter->tx_rings;
1986 device_t dev = adapter->dev;
1989 if (adapter->link_up){
1990 if (adapter->link_active == FALSE) {
1992 device_printf(dev,"Link is up %d Gbps %s \n",
1993 ((adapter->link_speed == 128)? 10:1),
1995 adapter->link_active = TRUE;
1996 if_link_state_change(ifp, LINK_STATE_UP);
1998 } else { /* Link down */
1999 if (adapter->link_active == TRUE) {
2001 device_printf(dev,"Link is Down\n");
2002 if_link_state_change(ifp, LINK_STATE_DOWN);
2003 adapter->link_active = FALSE;
2004 for (int i = 0; i < adapter->num_queues;
2006 txr->watchdog_timer = FALSE;
2014 /*********************************************************************
2016 * This routine disables all traffic on the adapter by issuing a
2017 * global reset on the MAC and deallocates TX/RX buffers.
2019 **********************************************************************/
2022 ixgbe_stop(void *arg)
2025 struct adapter *adapter = arg;
2028 mtx_assert(&adapter->core_mtx, MA_OWNED);
2030 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2031 ixgbe_disable_intr(adapter);
2033 /* Tell the stack that the interface is no longer active */
2034 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2036 ixgbe_reset_hw(&adapter->hw);
2037 adapter->hw.adapter_stopped = FALSE;
2038 ixgbe_stop_adapter(&adapter->hw);
2039 callout_stop(&adapter->timer);
2041 /* reprogram the RAR[0] in case user changed it. */
2042 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2048 /*********************************************************************
2050 * Determine hardware revision.
2052 **********************************************************************/
2054 ixgbe_identify_hardware(struct adapter *adapter)
2056 device_t dev = adapter->dev;
2058 /* Save off the information about this board */
2059 adapter->hw.vendor_id = pci_get_vendor(dev);
2060 adapter->hw.device_id = pci_get_device(dev);
2061 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2062 adapter->hw.subsystem_vendor_id =
2063 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2064 adapter->hw.subsystem_device_id =
2065 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2070 /*********************************************************************
2072 * Setup the Legacy or MSI Interrupt handler
2074 **********************************************************************/
2076 ixgbe_allocate_legacy(struct adapter *adapter)
2078 device_t dev = adapter->dev;
2079 struct tx_ring *txr = adapter->tx_rings;
2080 struct rx_ring *rxr = adapter->rx_rings;
2084 if (adapter->msix == 1)
2087 /* We allocate a single interrupt resource */
2088 adapter->res = bus_alloc_resource_any(dev,
2089 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2090 if (adapter->res == NULL) {
2091 device_printf(dev, "Unable to allocate bus resource: "
2097 * Try allocating a fast interrupt and the associated deferred
2098 * processing contexts.
2100 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
2101 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
2102 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
2103 taskqueue_thread_enqueue, &txr->tq);
2104 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
2105 taskqueue_thread_enqueue, &rxr->tq);
2106 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2107 device_get_nameunit(adapter->dev));
2108 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2109 device_get_nameunit(adapter->dev));
2111 /* Tasklets for Link, SFP and Multispeed Fiber */
2112 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2113 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2114 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2115 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2116 taskqueue_thread_enqueue, &adapter->tq);
2117 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2118 device_get_nameunit(adapter->dev));
2120 if ((error = bus_setup_intr(dev, adapter->res,
2121 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2122 adapter, &adapter->tag)) != 0) {
2123 device_printf(dev, "Failed to register fast interrupt "
2124 "handler: %d\n", error);
2125 taskqueue_free(txr->tq);
2126 taskqueue_free(rxr->tq);
2136 /*********************************************************************
2138 * Setup MSIX Interrupt resources and handlers
2140 **********************************************************************/
2142 ixgbe_allocate_msix(struct adapter *adapter)
2144 device_t dev = adapter->dev;
2145 struct tx_ring *txr = adapter->tx_rings;
2146 struct rx_ring *rxr = adapter->rx_rings;
2147 int error, rid, vector = 0;
2149 /* TX setup: the code is here for multi tx,
2150 there are other parts of the driver not ready for it */
2151 for (int i = 0; i < adapter->num_queues; i++, vector++, txr++) {
2153 txr->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2154 RF_SHAREABLE | RF_ACTIVE);
2156 device_printf(dev,"Unable to allocate"
2157 " bus resource: tx interrupt [%d]\n", vector);
2160 /* Set the handler function */
2161 error = bus_setup_intr(dev, txr->res,
2162 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2163 ixgbe_msix_tx, txr, &txr->tag);
2166 device_printf(dev, "Failed to register TX handler");
2171 ** Bind the msix vector, and thus the
2172 ** ring to the corresponding cpu.
2174 if (adapter->num_queues > 1)
2175 bus_bind_intr(dev, txr->res, i);
2177 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
2178 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
2179 taskqueue_thread_enqueue, &txr->tq);
2180 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2181 device_get_nameunit(adapter->dev));
2185 for (int i = 0; i < adapter->num_queues; i++, vector++, rxr++) {
2187 rxr->res = bus_alloc_resource_any(dev,
2188 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2190 device_printf(dev,"Unable to allocate"
2191 " bus resource: rx interrupt [%d],"
2192 "rid = %d\n", i, rid);
2195 /* Set the handler function */
2196 error = bus_setup_intr(dev, rxr->res,
2197 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2198 ixgbe_msix_rx, rxr, &rxr->tag);
2201 device_printf(dev, "Failed to register RX handler");
2205 /* used in local timer */
2206 adapter->rx_mask |= (u64)(1 << vector);
2208 ** Bind the msix vector, and thus the
2209 ** ring to the corresponding cpu.
2211 if (adapter->num_queues > 1)
2212 bus_bind_intr(dev, rxr->res, i);
2214 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
2215 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
2216 taskqueue_thread_enqueue, &rxr->tq);
2217 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2218 device_get_nameunit(adapter->dev));
2221 /* Now for Link changes */
2223 adapter->res = bus_alloc_resource_any(dev,
2224 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2225 if (!adapter->res) {
2226 device_printf(dev,"Unable to allocate"
2227 " bus resource: Link interrupt [%d]\n", rid);
2230 /* Set the link handler function */
2231 error = bus_setup_intr(dev, adapter->res,
2232 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2233 ixgbe_msix_link, adapter, &adapter->tag);
2235 adapter->res = NULL;
2236 device_printf(dev, "Failed to register LINK handler");
2239 adapter->linkvec = vector;
2240 /* Tasklets for Link, SFP and Multispeed Fiber */
2241 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2242 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2243 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2244 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2245 taskqueue_thread_enqueue, &adapter->tq);
2246 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2247 device_get_nameunit(adapter->dev));
2253 * Setup Either MSI/X or MSI
2256 ixgbe_setup_msix(struct adapter *adapter)
2258 device_t dev = adapter->dev;
2259 int rid, want, queues, msgs;
2261 /* Override by tuneable */
2262 if (ixgbe_enable_msix == 0)
2265 /* First try MSI/X */
2266 rid = PCIR_BAR(MSIX_82598_BAR);
2267 adapter->msix_mem = bus_alloc_resource_any(dev,
2268 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2269 if (!adapter->msix_mem) {
2270 rid += 4; /* 82599 maps in higher BAR */
2271 adapter->msix_mem = bus_alloc_resource_any(dev,
2272 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2274 if (!adapter->msix_mem) {
2275 /* May not be enabled */
2276 device_printf(adapter->dev,
2277 "Unable to map MSIX table \n");
2281 msgs = pci_msix_count(dev);
2282 if (msgs == 0) { /* system has msix disabled */
2283 bus_release_resource(dev, SYS_RES_MEMORY,
2284 rid, adapter->msix_mem);
2285 adapter->msix_mem = NULL;
2289 /* Figure out a reasonable auto config value */
2290 queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
2292 if (ixgbe_num_queues == 0)
2293 ixgbe_num_queues = queues;
2295 ** Want two vectors (RX/TX) per queue
2296 ** plus an additional for Link.
2298 want = (ixgbe_num_queues * 2) + 1;
2302 device_printf(adapter->dev,
2303 "MSIX Configuration Problem, "
2304 "%d vectors but %d queues wanted!\n",
2306 return (0); /* Will go to Legacy setup */
2308 if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
2309 device_printf(adapter->dev,
2310 "Using MSIX interrupts with %d vectors\n", msgs);
2311 adapter->num_queues = ixgbe_num_queues;
2315 msgs = pci_msi_count(dev);
2316 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2317 device_printf(adapter->dev,"Using MSI interrupt\n");
2323 ixgbe_allocate_pci_resources(struct adapter *adapter)
2326 device_t dev = adapter->dev;
2329 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2332 if (!(adapter->pci_mem)) {
2333 device_printf(dev,"Unable to allocate bus resource: memory\n");
2337 adapter->osdep.mem_bus_space_tag =
2338 rman_get_bustag(adapter->pci_mem);
2339 adapter->osdep.mem_bus_space_handle =
2340 rman_get_bushandle(adapter->pci_mem);
2341 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2343 /* Legacy defaults */
2344 adapter->num_queues = 1;
2345 adapter->hw.back = &adapter->osdep;
2348 ** Now setup MSI or MSI/X, should
2349 ** return us the number of supported
2350 ** vectors. (Will be 1 for MSI)
2352 adapter->msix = ixgbe_setup_msix(adapter);
2357 ixgbe_free_pci_resources(struct adapter * adapter)
2359 struct tx_ring *txr = adapter->tx_rings;
2360 struct rx_ring *rxr = adapter->rx_rings;
2361 device_t dev = adapter->dev;
2364 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2365 memrid = PCIR_BAR(MSIX_82598_BAR);
2367 memrid = PCIR_BAR(MSIX_82599_BAR);
2370 ** There is a slight possibility of a failure mode
2371 ** in attach that will result in entering this function
2372 ** before interrupt resources have been initialized, and
2373 ** in that case we do not want to execute the loops below
2374 ** We can detect this reliably by the state of the adapter
2377 if (adapter->res == NULL)
2381 ** Release all the interrupt resources:
2382 ** notice this is harmless for Legacy or
2383 ** MSI since pointers will always be NULL
2385 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2386 rid = txr->msix + 1;
2387 if (txr->tag != NULL) {
2388 bus_teardown_intr(dev, txr->res, txr->tag);
2391 if (txr->res != NULL)
2392 bus_release_resource(dev, SYS_RES_IRQ, rid, txr->res);
2395 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2396 rid = rxr->msix + 1;
2397 if (rxr->tag != NULL) {
2398 bus_teardown_intr(dev, rxr->res, rxr->tag);
2401 if (rxr->res != NULL)
2402 bus_release_resource(dev, SYS_RES_IRQ, rid, rxr->res);
2405 /* Clean the Legacy or Link interrupt last */
2406 if (adapter->linkvec) /* we are doing MSIX */
2407 rid = adapter->linkvec + 1;
2409 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2411 if (adapter->tag != NULL) {
2412 bus_teardown_intr(dev, adapter->res, adapter->tag);
2413 adapter->tag = NULL;
2415 if (adapter->res != NULL)
2416 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2419 pci_release_msi(dev);
2421 if (adapter->msix_mem != NULL)
2422 bus_release_resource(dev, SYS_RES_MEMORY,
2423 memrid, adapter->msix_mem);
2425 if (adapter->pci_mem != NULL)
2426 bus_release_resource(dev, SYS_RES_MEMORY,
2427 PCIR_BAR(0), adapter->pci_mem);
2432 /*********************************************************************
2434 * Initialize the hardware to a configuration as specified by the
2435 * adapter structure. The controller is reset, the EEPROM is
2436 * verified, the MAC address is set, then the shared initialization
2437 * routines are called.
2439 **********************************************************************/
2441 ixgbe_hardware_init(struct adapter *adapter)
2443 device_t dev = adapter->dev;
2448 /* Issue a global reset */
2449 adapter->hw.adapter_stopped = FALSE;
2450 ixgbe_stop_adapter(&adapter->hw);
2452 /* Make sure we have a good EEPROM before we read from it */
2453 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
2454 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
2458 /* Get Hardware Flow Control setting */
2459 adapter->hw.fc.requested_mode = ixgbe_fc_full;
2460 adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
2461 adapter->hw.fc.low_water = IXGBE_FC_LO;
2462 adapter->hw.fc.high_water = IXGBE_FC_HI;
2463 adapter->hw.fc.send_xon = TRUE;
2465 ret = ixgbe_init_hw(&adapter->hw);
2466 if (ret == IXGBE_ERR_EEPROM_VERSION) {
2467 device_printf(dev, "This device is a pre-production adapter/"
2468 "LOM. Please be aware there may be issues associated "
2469 "with your hardware.\n If you are experiencing problems "
2470 "please contact your Intel or hardware representative "
2471 "who provided you with this hardware.\n");
2472 } else if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2473 device_printf(dev,"Unsupported SFP+ Module\n");
2475 } else if (ret != 0 ) {
2476 device_printf(dev,"Hardware Initialization Failure\n");
2483 /*********************************************************************
2485 * Setup networking device structure and register an interface.
2487 **********************************************************************/
2489 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2492 struct ixgbe_hw *hw = &adapter->hw;
2493 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2495 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2497 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
2498 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2499 ifp->if_mtu = ETHERMTU;
2500 ifp->if_baudrate = 1000000000;
2501 ifp->if_init = ixgbe_init;
2502 ifp->if_softc = adapter;
2503 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2504 ifp->if_ioctl = ixgbe_ioctl;
2505 ifp->if_start = ixgbe_start;
2506 #if __FreeBSD_version >= 800000
2507 ifp->if_transmit = ixgbe_mq_start;
2508 ifp->if_qflush = ixgbe_qflush;
2511 ifp->if_watchdog = NULL;
2512 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
2514 ether_ifattach(ifp, adapter->hw.mac.addr);
2516 adapter->max_frame_size =
2517 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2520 * Tell the upper layer(s) we support long frames.
2522 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2524 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
2525 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2526 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
2528 ifp->if_capenable = ifp->if_capabilities;
2530 if (hw->device_id == IXGBE_DEV_ID_82598AT)
2531 ixgbe_setup_link_speed(hw, (IXGBE_LINK_SPEED_10GB_FULL |
2532 IXGBE_LINK_SPEED_1GB_FULL), TRUE, TRUE);
2534 ixgbe_setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL,
2538 * Specify the media types supported by this adapter and register
2539 * callbacks to update media and link information
2541 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2542 ixgbe_media_status);
2543 ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics |
2545 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2546 ifmedia_add(&adapter->media,
2547 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2548 ifmedia_add(&adapter->media,
2549 IFM_ETHER | IFM_1000_T, 0, NULL);
2551 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2552 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2557 /********************************************************************
2558 * Manage DMA'able memory.
2559 *******************************************************************/
2561 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2565 *(bus_addr_t *) arg = segs->ds_addr;
2570 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2571 struct ixgbe_dma_alloc *dma, int mapflags)
2573 device_t dev = adapter->dev;
2576 r = bus_dma_tag_create(NULL, /* parent */
2577 1, 0, /* alignment, bounds */
2578 BUS_SPACE_MAXADDR, /* lowaddr */
2579 BUS_SPACE_MAXADDR, /* highaddr */
2580 NULL, NULL, /* filter, filterarg */
2583 size, /* maxsegsize */
2584 BUS_DMA_ALLOCNOW, /* flags */
2585 NULL, /* lockfunc */
2586 NULL, /* lockfuncarg */
2589 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2593 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2594 BUS_DMA_NOWAIT, &dma->dma_map);
2596 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2600 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2604 mapflags | BUS_DMA_NOWAIT);
2606 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2610 dma->dma_size = size;
2613 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2615 bus_dma_tag_destroy(dma->dma_tag);
2617 dma->dma_map = NULL;
2618 dma->dma_tag = NULL;
2623 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2625 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2626 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2627 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2628 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2629 bus_dma_tag_destroy(dma->dma_tag);
2633 /*********************************************************************
2635 * Allocate memory for the transmit and receive rings, and then
2636 * the descriptors associated with each, called only once at attach.
2638 **********************************************************************/
2640 ixgbe_allocate_queues(struct adapter *adapter)
2642 device_t dev = adapter->dev;
2643 struct tx_ring *txr;
2644 struct rx_ring *rxr;
2645 int rsize, tsize, error = IXGBE_SUCCESS;
2646 int txconf = 0, rxconf = 0;
2648 /* First allocate the TX ring struct memory */
2649 if (!(adapter->tx_rings =
2650 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2651 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2652 device_printf(dev, "Unable to allocate TX ring memory\n");
2656 txr = adapter->tx_rings;
2658 /* Next allocate the RX */
2659 if (!(adapter->rx_rings =
2660 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2661 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2662 device_printf(dev, "Unable to allocate RX ring memory\n");
2666 rxr = adapter->rx_rings;
2668 /* For the ring itself */
2669 tsize = roundup2(adapter->num_tx_desc *
2670 sizeof(union ixgbe_adv_tx_desc), 4096);
2673 * Now set up the TX queues, txconf is needed to handle the
2674 * possibility that things fail midcourse and we need to
2675 * undo memory gracefully
2677 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2678 /* Set up some basics */
2679 txr = &adapter->tx_rings[i];
2680 txr->adapter = adapter;
2683 /* Initialize the TX side lock */
2684 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2685 device_get_nameunit(dev), txr->me);
2686 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2688 if (ixgbe_dma_malloc(adapter, tsize,
2689 &txr->txdma, BUS_DMA_NOWAIT)) {
2691 "Unable to allocate TX Descriptor memory\n");
2695 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2696 bzero((void *)txr->tx_base, tsize);
2698 /* Now allocate transmit buffers for the ring */
2699 if (ixgbe_allocate_transmit_buffers(txr)) {
2701 "Critical Failure setting up transmit buffers\n");
2705 #if __FreeBSD_version >= 800000
2706 /* Allocate a buf ring */
2707 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2708 M_WAITOK, &txr->tx_mtx);
2713 * Next the RX queues...
2715 rsize = roundup2(adapter->num_rx_desc *
2716 sizeof(union ixgbe_adv_rx_desc), 4096);
2717 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2718 rxr = &adapter->rx_rings[i];
2719 /* Set up some basics */
2720 rxr->adapter = adapter;
2723 /* Initialize the RX side lock */
2724 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2725 device_get_nameunit(dev), rxr->me);
2726 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2728 if (ixgbe_dma_malloc(adapter, rsize,
2729 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2731 "Unable to allocate RxDescriptor memory\n");
2735 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2736 bzero((void *)rxr->rx_base, rsize);
2738 /* Allocate receive buffers for the ring*/
2739 if (ixgbe_allocate_receive_buffers(rxr)) {
2741 "Critical Failure setting up receive buffers\n");
2750 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2751 ixgbe_dma_free(adapter, &rxr->rxdma);
2753 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2754 ixgbe_dma_free(adapter, &txr->txdma);
2755 free(adapter->rx_rings, M_DEVBUF);
2757 free(adapter->tx_rings, M_DEVBUF);
2762 /*********************************************************************
2764 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2765 * the information needed to transmit a packet on the wire. This is
2766 * called only once at attach, setup is done every reset.
2768 **********************************************************************/
2770 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2772 struct adapter *adapter = txr->adapter;
2773 device_t dev = adapter->dev;
2774 struct ixgbe_tx_buf *txbuf;
2778 * Setup DMA descriptor areas.
2780 if ((error = bus_dma_tag_create(NULL, /* parent */
2781 1, 0, /* alignment, bounds */
2782 BUS_SPACE_MAXADDR, /* lowaddr */
2783 BUS_SPACE_MAXADDR, /* highaddr */
2784 NULL, NULL, /* filter, filterarg */
2785 IXGBE_TSO_SIZE, /* maxsize */
2786 ixgbe_num_segs, /* nsegments */
2787 PAGE_SIZE, /* maxsegsize */
2789 NULL, /* lockfunc */
2790 NULL, /* lockfuncarg */
2792 device_printf(dev,"Unable to allocate TX DMA tag\n");
2796 if (!(txr->tx_buffers =
2797 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2798 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2799 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2804 /* Create the descriptor buffer dma maps */
2805 txbuf = txr->tx_buffers;
2806 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2807 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2809 device_printf(dev, "Unable to create TX DMA map\n");
2816 /* We free all, it handles case where we are in the middle */
2817 ixgbe_free_transmit_structures(adapter);
2821 /*********************************************************************
2823 * Initialize a transmit ring.
2825 **********************************************************************/
2827 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2829 struct adapter *adapter = txr->adapter;
2830 struct ixgbe_tx_buf *txbuf;
2833 /* Clear the old ring contents */
2834 bzero((void *)txr->tx_base,
2835 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2837 txr->next_avail_tx_desc = 0;
2838 txr->next_tx_to_clean = 0;
2840 /* Free any existing tx buffers. */
2841 txbuf = txr->tx_buffers;
2842 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2843 if (txbuf->m_head != NULL) {
2844 bus_dmamap_sync(txr->txtag, txbuf->map,
2845 BUS_DMASYNC_POSTWRITE);
2846 bus_dmamap_unload(txr->txtag, txbuf->map);
2847 m_freem(txbuf->m_head);
2848 txbuf->m_head = NULL;
2850 /* Clear the EOP index */
2851 txbuf->eop_index = -1;
2854 /* Set number of descriptors available */
2855 txr->tx_avail = adapter->num_tx_desc;
2857 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2858 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2861 /*********************************************************************
2863 * Initialize all transmit rings.
2865 **********************************************************************/
2867 ixgbe_setup_transmit_structures(struct adapter *adapter)
2869 struct tx_ring *txr = adapter->tx_rings;
2871 for (int i = 0; i < adapter->num_queues; i++, txr++)
2872 ixgbe_setup_transmit_ring(txr);
2877 /*********************************************************************
2879 * Enable transmit unit.
2881 **********************************************************************/
2883 ixgbe_initialize_transmit_units(struct adapter *adapter)
2885 struct tx_ring *txr = adapter->tx_rings;
2886 struct ixgbe_hw *hw = &adapter->hw;
2888 /* Setup the Base and Length of the Tx Descriptor Ring */
2890 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2891 u64 tdba = txr->txdma.dma_paddr;
2893 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2894 (tdba & 0x00000000ffffffffULL));
2895 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2896 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2897 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2899 /* Setup the HW Tx Head and Tail descriptor pointers */
2900 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2901 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2903 /* Setup Transmit Descriptor Cmd Settings */
2904 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2906 txr->watchdog_timer = 0;
2909 if (hw->mac.type == ixgbe_mac_82599EB) {
2911 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2912 dmatxctl |= IXGBE_DMATXCTL_TE;
2913 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2919 /*********************************************************************
2921 * Free all transmit rings.
2923 **********************************************************************/
2925 ixgbe_free_transmit_structures(struct adapter *adapter)
2927 struct tx_ring *txr = adapter->tx_rings;
2929 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2931 ixgbe_free_transmit_buffers(txr);
2932 ixgbe_dma_free(adapter, &txr->txdma);
2933 IXGBE_TX_UNLOCK(txr);
2934 IXGBE_TX_LOCK_DESTROY(txr);
2936 free(adapter->tx_rings, M_DEVBUF);
2939 /*********************************************************************
2941 * Free transmit ring related data structures.
2943 **********************************************************************/
2945 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2947 struct adapter *adapter = txr->adapter;
2948 struct ixgbe_tx_buf *tx_buffer;
2951 INIT_DEBUGOUT("free_transmit_ring: begin");
2953 if (txr->tx_buffers == NULL)
2956 tx_buffer = txr->tx_buffers;
2957 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2958 if (tx_buffer->m_head != NULL) {
2959 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2960 BUS_DMASYNC_POSTWRITE);
2961 bus_dmamap_unload(txr->txtag,
2963 m_freem(tx_buffer->m_head);
2964 tx_buffer->m_head = NULL;
2965 if (tx_buffer->map != NULL) {
2966 bus_dmamap_destroy(txr->txtag,
2968 tx_buffer->map = NULL;
2970 } else if (tx_buffer->map != NULL) {
2971 bus_dmamap_unload(txr->txtag,
2973 bus_dmamap_destroy(txr->txtag,
2975 tx_buffer->map = NULL;
2978 #if __FreeBSD_version >= 800000
2979 if (txr->br != NULL)
2980 buf_ring_free(txr->br, M_DEVBUF);
2982 if (txr->tx_buffers != NULL) {
2983 free(txr->tx_buffers, M_DEVBUF);
2984 txr->tx_buffers = NULL;
2986 if (txr->txtag != NULL) {
2987 bus_dma_tag_destroy(txr->txtag);
2993 /*********************************************************************
2995 * Advanced Context Descriptor setup for VLAN or CSUM
2997 **********************************************************************/
3000 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
3002 struct adapter *adapter = txr->adapter;
3003 struct ixgbe_adv_tx_context_desc *TXD;
3004 struct ixgbe_tx_buf *tx_buffer;
3005 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3006 struct ether_vlan_header *eh;
3008 struct ip6_hdr *ip6;
3009 int ehdrlen, ip_hlen = 0;
3012 bool offload = TRUE;
3013 int ctxd = txr->next_avail_tx_desc;
3017 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3020 tx_buffer = &txr->tx_buffers[ctxd];
3021 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3024 ** In advanced descriptors the vlan tag must
3025 ** be placed into the descriptor itself.
3027 if (mp->m_flags & M_VLANTAG) {
3028 vtag = htole16(mp->m_pkthdr.ether_vtag);
3029 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3030 } else if (offload == FALSE)
3034 * Determine where frame payload starts.
3035 * Jump over vlan headers if already present,
3036 * helpful for QinQ too.
3038 eh = mtod(mp, struct ether_vlan_header *);
3039 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3040 etype = ntohs(eh->evl_proto);
3041 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3043 etype = ntohs(eh->evl_encap_proto);
3044 ehdrlen = ETHER_HDR_LEN;
3047 /* Set the ether header length */
3048 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3052 ip = (struct ip *)(mp->m_data + ehdrlen);
3053 ip_hlen = ip->ip_hl << 2;
3054 if (mp->m_len < ehdrlen + ip_hlen)
3057 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3059 case ETHERTYPE_IPV6:
3060 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3061 ip_hlen = sizeof(struct ip6_hdr);
3062 if (mp->m_len < ehdrlen + ip_hlen)
3064 ipproto = ip6->ip6_nxt;
3065 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3072 vlan_macip_lens |= ip_hlen;
3073 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3077 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3078 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3082 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3083 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
3091 /* Now copy bits into descriptor */
3092 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3093 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3094 TXD->seqnum_seed = htole32(0);
3095 TXD->mss_l4len_idx = htole32(0);
3097 tx_buffer->m_head = NULL;
3098 tx_buffer->eop_index = -1;
3100 /* We've consumed the first desc, adjust counters */
3101 if (++ctxd == adapter->num_tx_desc)
3103 txr->next_avail_tx_desc = ctxd;
3109 /**********************************************************************
3111 * Setup work for hardware segmentation offload (TSO) on
3112 * adapters using advanced tx descriptors
3114 **********************************************************************/
3116 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3118 struct adapter *adapter = txr->adapter;
3119 struct ixgbe_adv_tx_context_desc *TXD;
3120 struct ixgbe_tx_buf *tx_buffer;
3121 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3122 u32 mss_l4len_idx = 0;
3124 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
3125 struct ether_vlan_header *eh;
3131 * Determine where frame payload starts.
3132 * Jump over vlan headers if already present
3134 eh = mtod(mp, struct ether_vlan_header *);
3135 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3136 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3138 ehdrlen = ETHER_HDR_LEN;
3140 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3141 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3144 ctxd = txr->next_avail_tx_desc;
3145 tx_buffer = &txr->tx_buffers[ctxd];
3146 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3148 ip = (struct ip *)(mp->m_data + ehdrlen);
3149 if (ip->ip_p != IPPROTO_TCP)
3150 return FALSE; /* 0 */
3152 ip_hlen = ip->ip_hl << 2;
3153 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3154 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3155 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3156 tcp_hlen = th->th_off << 2;
3157 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3159 /* This is used in the transmit desc in encap */
3160 *paylen = mp->m_pkthdr.len - hdrlen;
3162 /* VLAN MACLEN IPLEN */
3163 if (mp->m_flags & M_VLANTAG) {
3164 vtag = htole16(mp->m_pkthdr.ether_vtag);
3165 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3168 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3169 vlan_macip_lens |= ip_hlen;
3170 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3172 /* ADV DTYPE TUCMD */
3173 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3174 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3175 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3176 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3180 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3181 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3182 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3184 TXD->seqnum_seed = htole32(0);
3185 tx_buffer->m_head = NULL;
3186 tx_buffer->eop_index = -1;
3188 if (++ctxd == adapter->num_tx_desc)
3192 txr->next_avail_tx_desc = ctxd;
3197 /**********************************************************************
3199 * Examine each tx_buffer in the used queue. If the hardware is done
3200 * processing the packet then free associated resources. The
3201 * tx_buffer is put back on the free queue.
3203 **********************************************************************/
3205 ixgbe_txeof(struct tx_ring *txr)
3207 struct adapter * adapter = txr->adapter;
3208 struct ifnet *ifp = adapter->ifp;
3209 u32 first, last, done, num_avail;
3211 struct ixgbe_tx_buf *tx_buffer;
3212 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3214 mtx_assert(&txr->tx_mtx, MA_OWNED);
3216 if (txr->tx_avail == adapter->num_tx_desc)
3219 num_avail = txr->tx_avail;
3220 first = txr->next_tx_to_clean;
3222 tx_buffer = &txr->tx_buffers[first];
3223 /* For cleanup we just use legacy struct */
3224 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3225 last = tx_buffer->eop_index;
3229 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3231 ** Get the index of the first descriptor
3232 ** BEYOND the EOP and call that 'done'.
3233 ** I do this so the comparison in the
3234 ** inner while loop below can be simple
3236 if (++last == adapter->num_tx_desc) last = 0;
3239 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3240 BUS_DMASYNC_POSTREAD);
3242 ** Only the EOP descriptor of a packet now has the DD
3243 ** bit set, this is what we look for...
3245 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3246 /* We clean the range of the packet */
3247 while (first != done) {
3248 tx_desc->upper.data = 0;
3249 tx_desc->lower.data = 0;
3250 tx_desc->buffer_addr = 0;
3251 num_avail++; cleaned++;
3253 if (tx_buffer->m_head) {
3255 bus_dmamap_sync(txr->txtag,
3257 BUS_DMASYNC_POSTWRITE);
3258 bus_dmamap_unload(txr->txtag,
3260 m_freem(tx_buffer->m_head);
3261 tx_buffer->m_head = NULL;
3262 tx_buffer->map = NULL;
3264 tx_buffer->eop_index = -1;
3266 if (++first == adapter->num_tx_desc)
3269 tx_buffer = &txr->tx_buffers[first];
3271 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3273 /* See if there is more work now */
3274 last = tx_buffer->eop_index;
3277 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3278 /* Get next done point */
3279 if (++last == adapter->num_tx_desc) last = 0;
3284 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3285 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3287 txr->next_tx_to_clean = first;
3290 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
3291 * it is OK to send packets. If there are no pending descriptors,
3292 * clear the timeout. Otherwise, if some descriptors have been freed,
3293 * restart the timeout.
3295 if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
3296 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3297 /* If all are clean turn off the timer */
3298 if (num_avail == adapter->num_tx_desc) {
3299 txr->watchdog_timer = 0;
3300 txr->tx_avail = num_avail;
3305 /* Some were cleaned, so reset timer */
3307 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
3308 txr->tx_avail = num_avail;
3312 /*********************************************************************
3314 * Get a buffer from system mbuf buffer pool.
3316 **********************************************************************/
3318 ixgbe_get_buf(struct rx_ring *rxr, int i, u8 clean)
3320 struct adapter *adapter = rxr->adapter;
3321 bus_dma_segment_t seg[2];
3322 struct ixgbe_rx_buf *rxbuf;
3323 struct mbuf *mh, *mp;
3329 rxbuf = &rxr->rx_buffers[i];
3331 /* First get our header and payload mbuf */
3332 if (clean & IXGBE_CLEAN_HDR) {
3333 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3337 mh = rxr->rx_buffers[i].m_head;
3340 mh->m_flags |= M_PKTHDR;
3342 if (clean & IXGBE_CLEAN_PKT) {
3343 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3344 M_PKTHDR, adapter->rx_mbuf_sz);
3347 mp->m_len = adapter->rx_mbuf_sz;
3348 mp->m_flags &= ~M_PKTHDR;
3349 } else { /* reusing */
3350 mp = rxr->rx_buffers[i].m_pack;
3351 mp->m_len = adapter->rx_mbuf_sz;
3352 mp->m_flags &= ~M_PKTHDR;
3355 ** Need to create a chain for the following
3356 ** dmamap call at this point.
3359 mh->m_pkthdr.len = mh->m_len + mp->m_len;
3361 /* Get the memory mapping */
3362 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3363 rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3365 printf("GET BUF: dmamap load failure - %d\n", error);
3370 /* Unload old mapping and update buffer struct */
3371 if (rxbuf->m_head != NULL)
3372 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3374 rxbuf->map = rxr->spare_map;
3375 rxr->spare_map = map;
3378 bus_dmamap_sync(rxr->rxtag,
3379 rxbuf->map, BUS_DMASYNC_PREREAD);
3381 /* Update descriptor */
3382 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3383 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3388 ** If we get here, we have an mbuf resource
3389 ** issue, so we discard the incoming packet
3390 ** and attempt to reuse existing mbufs next
3391 ** pass thru the ring, but to do so we must
3392 ** fix up the descriptor which had the address
3393 ** clobbered with writeback info.
3396 adapter->mbuf_header_failed++;
3398 /* Is there a reusable buffer? */
3399 mh = rxr->rx_buffers[i].m_head;
3400 if (mh == NULL) /* Nope, init error */
3402 mp = rxr->rx_buffers[i].m_pack;
3403 if (mp == NULL) /* Nope, init error */
3405 /* Get our old mapping */
3406 rxbuf = &rxr->rx_buffers[i];
3407 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3408 rxbuf->map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3410 /* We really have a problem */
3414 /* Now fix the descriptor as needed */
3415 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3416 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3422 /*********************************************************************
3424 * Allocate memory for rx_buffer structures. Since we use one
3425 * rx_buffer per received packet, the maximum number of rx_buffer's
3426 * that we'll need is equal to the number of receive descriptors
3427 * that we've allocated.
3429 **********************************************************************/
3431 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3433 struct adapter *adapter = rxr->adapter;
3434 device_t dev = adapter->dev;
3435 struct ixgbe_rx_buf *rxbuf;
3436 int i, bsize, error;
3438 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3439 if (!(rxr->rx_buffers =
3440 (struct ixgbe_rx_buf *) malloc(bsize,
3441 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3442 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3448 ** The tag is made to accomodate the largest buffer size
3449 ** with packet split (hence the two segments, even though
3450 ** it may not always use this.
3452 if ((error = bus_dma_tag_create(NULL, /* parent */
3453 1, 0, /* alignment, bounds */
3454 BUS_SPACE_MAXADDR, /* lowaddr */
3455 BUS_SPACE_MAXADDR, /* highaddr */
3456 NULL, NULL, /* filter, filterarg */
3457 MJUM16BYTES, /* maxsize */
3459 MJUMPAGESIZE, /* maxsegsize */
3461 NULL, /* lockfunc */
3462 NULL, /* lockfuncarg */
3464 device_printf(dev, "Unable to create RX DMA tag\n");
3468 /* Create the spare map (used by getbuf) */
3469 error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
3472 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3477 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3478 rxbuf = &rxr->rx_buffers[i];
3479 error = bus_dmamap_create(rxr->rxtag,
3480 BUS_DMA_NOWAIT, &rxbuf->map);
3482 device_printf(dev, "Unable to create RX DMA map\n");
3490 /* Frees all, but can handle partial completion */
3491 ixgbe_free_receive_structures(adapter);
3495 /*********************************************************************
3497 * Initialize a receive ring and its buffers.
3499 **********************************************************************/
3501 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3503 struct adapter *adapter;
3506 struct ixgbe_rx_buf *rxbuf;
3507 struct lro_ctrl *lro = &rxr->lro;
3510 adapter = rxr->adapter;
3514 /* Clear the ring contents */
3515 rsize = roundup2(adapter->num_rx_desc *
3516 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3517 bzero((void *)rxr->rx_base, rsize);
3520 ** Free current RX buffer structs and their mbufs
3522 for (int i = 0; i < adapter->num_rx_desc; i++) {
3523 rxbuf = &rxr->rx_buffers[i];
3524 if (rxbuf->m_head != NULL) {
3525 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3526 BUS_DMASYNC_POSTREAD);
3527 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3528 if (rxbuf->m_head) {
3529 rxbuf->m_head->m_next = rxbuf->m_pack;
3530 m_freem(rxbuf->m_head);
3532 rxbuf->m_head = NULL;
3533 rxbuf->m_pack = NULL;
3537 /* Now refresh the mbufs */
3538 for (j = 0; j < adapter->num_rx_desc; j++) {
3539 if (ixgbe_get_buf(rxr, j, IXGBE_CLEAN_ALL) == ENOBUFS) {
3540 rxr->rx_buffers[j].m_head = NULL;
3541 rxr->rx_buffers[j].m_pack = NULL;
3542 rxr->rx_base[j].read.hdr_addr = 0;
3543 rxr->rx_base[j].read.pkt_addr = 0;
3548 /* Setup our descriptor indices */
3549 rxr->next_to_check = 0;
3550 rxr->last_cleaned = 0;
3551 rxr->lro_enabled = FALSE;
3553 /* Use header split if configured */
3554 if (ixgbe_header_split)
3555 rxr->hdr_split = TRUE;
3557 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3558 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3561 ** Now set up the LRO interface, we
3562 ** also only do head split when LRO
3563 ** is enabled, since so often they
3564 ** are undesireable in similar setups.
3566 if (ifp->if_capenable & IFCAP_LRO) {
3567 int err = tcp_lro_init(lro);
3569 INIT_DEBUGOUT("LRO Initialization failed!\n");
3572 INIT_DEBUGOUT("RX LRO Initialized\n");
3573 rxr->lro_enabled = TRUE;
3574 lro->ifp = adapter->ifp;
3581 * We need to clean up any buffers allocated
3582 * so far, 'j' is the failing index.
3584 for (int i = 0; i < j; i++) {
3585 rxbuf = &rxr->rx_buffers[i];
3586 if (rxbuf->m_head != NULL) {
3587 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3588 BUS_DMASYNC_POSTREAD);
3589 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3590 m_freem(rxbuf->m_head);
3591 rxbuf->m_head = NULL;
3597 /*********************************************************************
3599 * Initialize all receive rings.
3601 **********************************************************************/
3603 ixgbe_setup_receive_structures(struct adapter *adapter)
3605 struct rx_ring *rxr = adapter->rx_rings;
3608 for (j = 0; j < adapter->num_queues; j++, rxr++)
3609 if (ixgbe_setup_receive_ring(rxr))
3615 * Free RX buffers allocated so far, we will only handle
3616 * the rings that completed, the failing case will have
3617 * cleaned up for itself. 'j' failed, so its the terminus.
3619 for (int i = 0; i < j; ++i) {
3620 rxr = &adapter->rx_rings[i];
3621 for (int n = 0; n < adapter->num_rx_desc; n++) {
3622 struct ixgbe_rx_buf *rxbuf;
3623 rxbuf = &rxr->rx_buffers[n];
3624 if (rxbuf->m_head != NULL) {
3625 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3626 BUS_DMASYNC_POSTREAD);
3627 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3628 m_freem(rxbuf->m_head);
3629 rxbuf->m_head = NULL;
3637 /*********************************************************************
3639 * Setup receive registers and features.
3641 **********************************************************************/
3642 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3645 ixgbe_initialize_receive_units(struct adapter *adapter)
3647 struct rx_ring *rxr = adapter->rx_rings;
3648 struct ixgbe_hw *hw = &adapter->hw;
3649 struct ifnet *ifp = adapter->ifp;
3650 u32 bufsz, rxctrl, fctrl, srrctl, rxcsum;
3651 u32 reta, mrqc = 0, hlreg, random[10];
3655 * Make sure receives are disabled while
3656 * setting up the descriptor ring
3658 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3659 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3660 rxctrl & ~IXGBE_RXCTRL_RXEN);
3662 /* Enable broadcasts */
3663 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3664 fctrl |= IXGBE_FCTRL_BAM;
3665 fctrl |= IXGBE_FCTRL_DPF;
3666 fctrl |= IXGBE_FCTRL_PMCF;
3667 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3669 /* Set for Jumbo Frames? */
3670 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3671 if (ifp->if_mtu > ETHERMTU) {
3672 hlreg |= IXGBE_HLREG0_JUMBOEN;
3673 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3675 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3676 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3678 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3680 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3681 u64 rdba = rxr->rxdma.dma_paddr;
3683 /* Setup the Base and Length of the Rx Descriptor Ring */
3684 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3685 (rdba & 0x00000000ffffffffULL));
3686 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
3687 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3688 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3690 /* Set up the SRRCTL register */
3691 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
3692 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3693 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3695 if (rxr->hdr_split) {
3696 /* Use a standard mbuf for the header */
3697 srrctl |= ((IXGBE_RX_HDR <<
3698 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3699 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3700 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3701 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3702 /* PSRTYPE must be initialized in 82599 */
3703 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3704 IXGBE_PSRTYPE_UDPHDR |
3705 IXGBE_PSRTYPE_IPV4HDR |
3706 IXGBE_PSRTYPE_IPV6HDR;
3707 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3710 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3711 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
3713 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3714 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3715 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3718 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3721 if (adapter->num_queues > 1) {
3725 /* set up random bits */
3726 arc4rand(&random, sizeof(random), 0);
3728 /* Set up the redirection table */
3729 for (i = 0, j = 0; i < 128; i++, j++) {
3730 if (j == adapter->num_queues) j = 0;
3731 reta = (reta << 8) | (j * 0x11);
3733 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3736 /* Now fill our hash function seeds */
3737 for (int i = 0; i < 10; i++)
3738 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3740 /* Perform hash on these packet types */
3741 mrqc = IXGBE_MRQC_RSSEN
3742 | IXGBE_MRQC_RSS_FIELD_IPV4
3743 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3744 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3745 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3746 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3747 | IXGBE_MRQC_RSS_FIELD_IPV6
3748 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3749 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3750 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3751 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3753 /* RSS and RX IPP Checksum are mutually exclusive */
3754 rxcsum |= IXGBE_RXCSUM_PCSD;
3757 if (ifp->if_capenable & IFCAP_RXCSUM)
3758 rxcsum |= IXGBE_RXCSUM_PCSD;
3760 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3761 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3763 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3768 /*********************************************************************
3770 * Free all receive rings.
3772 **********************************************************************/
3774 ixgbe_free_receive_structures(struct adapter *adapter)
3776 struct rx_ring *rxr = adapter->rx_rings;
3778 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3779 struct lro_ctrl *lro = &rxr->lro;
3780 ixgbe_free_receive_buffers(rxr);
3781 /* Free LRO memory */
3783 /* Free the ring memory as well */
3784 ixgbe_dma_free(adapter, &rxr->rxdma);
3787 free(adapter->rx_rings, M_DEVBUF);
3790 /*********************************************************************
3792 * Free receive ring data structures
3794 **********************************************************************/
3796 ixgbe_free_receive_buffers(struct rx_ring *rxr)
3798 struct adapter *adapter = NULL;
3799 struct ixgbe_rx_buf *rxbuf = NULL;
3801 INIT_DEBUGOUT("free_receive_buffers: begin");
3802 adapter = rxr->adapter;
3803 if (rxr->rx_buffers != NULL) {
3804 rxbuf = &rxr->rx_buffers[0];
3805 for (int i = 0; i < adapter->num_rx_desc; i++) {
3806 if (rxbuf->map != NULL) {
3807 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3808 BUS_DMASYNC_POSTREAD);
3809 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3810 bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
3812 if (rxbuf->m_head != NULL) {
3813 m_freem(rxbuf->m_head);
3815 rxbuf->m_head = NULL;
3819 if (rxr->rx_buffers != NULL) {
3820 free(rxr->rx_buffers, M_DEVBUF);
3821 rxr->rx_buffers = NULL;
3823 if (rxr->rxtag != NULL) {
3824 bus_dma_tag_destroy(rxr->rxtag);
3830 /*********************************************************************
3832 * This routine executes in interrupt context. It replenishes
3833 * the mbufs in the descriptor and sends data which has been
3834 * dma'ed into host memory to upper layer.
3836 * We loop at most count times if count is > 0, or until done if
3839 * Return TRUE for more work, FALSE for all clean.
3840 *********************************************************************/
3842 ixgbe_rxeof(struct rx_ring *rxr, int count)
3844 struct adapter *adapter = rxr->adapter;
3845 struct ifnet *ifp = adapter->ifp;
3846 struct lro_ctrl *lro = &rxr->lro;
3847 struct lro_entry *queued;
3850 union ixgbe_adv_rx_desc *cur;
3854 i = rxr->next_to_check;
3855 cur = &rxr->rx_base[i];
3856 staterr = cur->wb.upper.status_error;
3858 if (!(staterr & IXGBE_RXD_STAT_DD)) {
3859 IXGBE_RX_UNLOCK(rxr);
3864 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3865 BUS_DMASYNC_POSTREAD);
3867 while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
3868 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3869 struct mbuf *sendmp, *mh, *mp;
3870 u16 hlen, plen, hdr, vtag;
3871 u8 dopayload, accept_frame, eop;
3875 hlen = plen = vtag = 0;
3876 sendmp = mh = mp = NULL;
3878 /* Sync the buffers */
3879 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
3880 BUS_DMASYNC_POSTREAD);
3883 ** The way the hardware is configured to
3884 ** split, it will ONLY use the header buffer
3885 ** when header split is enabled, otherwise we
3886 ** get normal behavior, ie, both header and
3887 ** payload are DMA'd into the payload buffer.
3889 ** The fmp test is to catch the case where a
3890 ** packet spans multiple descriptors, in that
3891 ** case only the first header is valid.
3893 if ((rxr->hdr_split) && (rxr->fmp == NULL)){
3895 wb.lower.lo_dword.hs_rss.hdr_info);
3896 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3897 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3898 if (hlen > IXGBE_RX_HDR)
3899 hlen = IXGBE_RX_HDR;
3900 plen = le16toh(cur->wb.upper.length);
3901 /* Handle the header mbuf */
3902 mh = rxr->rx_buffers[i].m_head;
3904 dopayload = IXGBE_CLEAN_HDR;
3906 ** Get the payload length, this
3907 ** could be zero if its a small
3911 mp = rxr->rx_buffers[i].m_pack;
3914 mp->m_flags &= ~M_PKTHDR;
3916 mh->m_flags |= M_PKTHDR;
3917 dopayload = IXGBE_CLEAN_ALL;
3918 rxr->rx_split_packets++;
3919 } else { /* small packets */
3920 mh->m_flags &= ~M_PKTHDR;
3925 ** Either no header split, or a
3926 ** secondary piece of a fragmented
3929 mh = rxr->rx_buffers[i].m_pack;
3930 mh->m_flags |= M_PKTHDR;
3931 mh->m_len = le16toh(cur->wb.upper.length);
3932 dopayload = IXGBE_CLEAN_PKT;
3935 if (staterr & IXGBE_RXD_STAT_EOP) {
3941 #ifdef IXGBE_IEEE1588
3942 This code needs to be converted to work here
3943 -----------------------------------------------------
3944 if (unlikely(staterr & IXGBE_RXD_STAT_TS)) {
3947 // Create an mtag and set it up
3948 struct skb_shared_hwtstamps *shhwtstamps =
3951 rd32(IXGBE_TSYNCRXCTL) & IXGBE_TSYNCRXCTL_VALID),
3952 "igb: no RX time stamp available for time stamped packet");
3953 regval = rd32(IXGBE_RXSTMPL);
3954 regval |= (u64)rd32(IXGBE_RXSTMPH) << 32;
3955 // Do time conversion from the register
3956 ns = timecounter_cyc2time(&adapter->clock, regval);
3957 clocksync_update(&adapter->sync, ns);
3958 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
3959 shhwtstamps->hwtstamp = ns_to_ktime(ns);
3960 shhwtstamps->syststamp =
3961 clocksync_hw2sys(&adapter->sync, ns);
3965 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
3970 ** Save the vlan id, because get_buf will
3971 ** clobber the writeback descriptor...
3973 vtag = le16toh(cur->wb.upper.vlan);
3974 if (ixgbe_get_buf(rxr, i, dopayload) != 0) {
3978 /* Initial frame - setup */
3979 if (rxr->fmp == NULL) {
3980 mh->m_flags |= M_PKTHDR;
3981 mh->m_pkthdr.len = mh->m_len;
3982 rxr->fmp = mh; /* Store the first mbuf */
3984 if (mp) { /* Add payload if split */
3985 mh->m_pkthdr.len += mp->m_len;
3986 rxr->lmp = mh->m_next;
3989 /* Chain mbuf's together */
3990 mh->m_flags &= ~M_PKTHDR;
3991 rxr->lmp->m_next = mh;
3992 rxr->lmp = rxr->lmp->m_next;
3993 rxr->fmp->m_pkthdr.len += mh->m_len;
3997 rxr->fmp->m_pkthdr.rcvif = ifp;
4000 /* capture data for AIM */
4001 rxr->bytes += rxr->fmp->m_pkthdr.len;
4002 rxr->rx_bytes += rxr->bytes;
4003 if (ifp->if_capenable & IFCAP_RXCSUM)
4004 ixgbe_rx_checksum(staterr, rxr->fmp);
4006 rxr->fmp->m_pkthdr.csum_flags = 0;
4007 if (staterr & IXGBE_RXD_STAT_VP) {
4008 rxr->fmp->m_pkthdr.ether_vtag = vtag;
4009 rxr->fmp->m_flags |= M_VLANTAG;
4011 #if __FreeBSD_version >= 800000
4012 rxr->fmp->m_pkthdr.flowid = curcpu;
4013 rxr->fmp->m_flags |= M_FLOWID;
4022 /* Reuse loaded DMA map and just update mbuf chain */
4024 mh = rxr->rx_buffers[i].m_head;
4028 mp = rxr->rx_buffers[i].m_pack;
4029 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
4030 mp->m_data = mp->m_ext.ext_buf;
4032 if (adapter->max_frame_size <=
4033 (MCLBYTES - ETHER_ALIGN))
4034 m_adj(mp, ETHER_ALIGN);
4035 if (rxr->fmp != NULL) {
4036 /* handles the whole chain */
4043 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4044 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4046 rxr->last_cleaned = i; /* for updating tail */
4048 if (++i == adapter->num_rx_desc)
4052 ** Now send up to the stack,
4053 ** note the the value of next_to_check
4054 ** is safe because we keep the RX lock
4057 if (sendmp != NULL) {
4059 ** Send to the stack if:
4060 ** - LRO not enabled, or
4061 ** - no LRO resources, or
4062 ** - lro enqueue fails
4064 if ((!rxr->lro_enabled) ||
4065 ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0))))
4066 (*ifp->if_input)(ifp, sendmp);
4069 /* Get next descriptor */
4070 cur = &rxr->rx_base[i];
4071 staterr = cur->wb.upper.status_error;
4073 rxr->next_to_check = i;
4075 /* Advance the IXGB's Receive Queue "Tail Pointer" */
4076 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
4079 * Flush any outstanding LRO work
4081 while (!SLIST_EMPTY(&lro->lro_active)) {
4082 queued = SLIST_FIRST(&lro->lro_active);
4083 SLIST_REMOVE_HEAD(&lro->lro_active, next);
4084 tcp_lro_flush(lro, queued);
4087 IXGBE_RX_UNLOCK(rxr);
4090 ** Leaving with more to clean?
4091 ** then schedule another interrupt.
4093 if (staterr & IXGBE_RXD_STAT_DD) {
4094 ixgbe_rearm_rx_queues(adapter, (u64)(1 << rxr->msix));
4101 /*********************************************************************
4103 * Verify that the hardware indicated that the checksum is valid.
4104 * Inform the stack about the status of checksum so that stack
4105 * doesn't spend time verifying the checksum.
4107 *********************************************************************/
4109 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
4111 u16 status = (u16) staterr;
4112 u8 errors = (u8) (staterr >> 24);
4114 if (status & IXGBE_RXD_STAT_IPCS) {
4116 if (!(errors & IXGBE_RXD_ERR_IPE)) {
4117 /* IP Checksum Good */
4118 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4119 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4122 mp->m_pkthdr.csum_flags = 0;
4124 if (status & IXGBE_RXD_STAT_L4CS) {
4126 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
4127 mp->m_pkthdr.csum_flags |=
4128 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4129 mp->m_pkthdr.csum_data = htons(0xffff);
4137 ** This routine is run via an vlan config EVENT,
4138 ** it enables us to use the HW Filter table since
4139 ** we can get the vlan id. This just creates the
4140 ** entry in the soft version of the VFTA, init will
4141 ** repopulate the real table.
4144 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4146 struct adapter *adapter = ifp->if_softc;
4149 if (ifp->if_softc != arg) /* Not our event */
4152 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4155 index = (vtag >> 5) & 0x7F;
4157 ixgbe_shadow_vfta[index] |= (1 << bit);
4158 ++adapter->num_vlans;
4159 /* Re-init to load the changes */
4160 ixgbe_init(adapter);
4164 ** This routine is run via an vlan
4165 ** unconfig EVENT, remove our entry
4166 ** in the soft vfta.
4169 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4171 struct adapter *adapter = ifp->if_softc;
4174 if (ifp->if_softc != arg)
4177 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4180 index = (vtag >> 5) & 0x7F;
4182 ixgbe_shadow_vfta[index] &= ~(1 << bit);
4183 --adapter->num_vlans;
4184 /* Re-init to load the changes */
4185 ixgbe_init(adapter);
4189 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
4191 struct ixgbe_hw *hw = &adapter->hw;
4196 ** We get here thru init_locked, meaning
4197 ** a soft reset, this has already cleared
4198 ** the VFTA and other state, so if there
4199 ** have been no vlan's registered do nothing.
4201 if (adapter->num_vlans == 0)
4205 ** A soft reset zero's out the VFTA, so
4206 ** we need to repopulate it now.
4208 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
4209 if (ixgbe_shadow_vfta[i] != 0)
4210 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
4211 ixgbe_shadow_vfta[i]);
4213 /* Enable the Filter Table */
4214 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4215 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
4216 ctrl |= IXGBE_VLNCTRL_VFE;
4217 if (hw->mac.type == ixgbe_mac_82598EB)
4218 ctrl |= IXGBE_VLNCTRL_VME;
4219 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
4221 /* On 82599 the VLAN enable is per/queue in RXDCTL */
4222 if (hw->mac.type == ixgbe_mac_82599EB)
4223 for (int i = 0; i < adapter->num_queues; i++) {
4224 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
4225 ctrl |= IXGBE_RXDCTL_VME;
4226 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
4231 ixgbe_enable_intr(struct adapter *adapter)
4233 struct ixgbe_hw *hw = &adapter->hw;
4234 struct tx_ring *txr = adapter->tx_rings;
4235 struct rx_ring *rxr = adapter->rx_rings;
4236 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4239 /* Enable Fan Failure detection */
4240 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4241 mask |= IXGBE_EIMS_GPI_SDP1;
4243 /* 82599 specific interrupts */
4244 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4245 mask |= IXGBE_EIMS_ECC;
4246 mask |= IXGBE_EIMS_GPI_SDP1;
4247 mask |= IXGBE_EIMS_GPI_SDP2;
4250 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4252 /* With RSS we use auto clear */
4253 if (adapter->msix_mem) {
4254 mask = IXGBE_EIMS_ENABLE_MASK;
4255 /* Dont autoclear Link */
4256 mask &= ~IXGBE_EIMS_OTHER;
4257 mask &= ~IXGBE_EIMS_LSC;
4258 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4262 ** Now enable all queues, this is done seperately to
4263 ** allow for handling the extended (beyond 32) MSIX
4264 ** vectors that can be used by 82599
4266 for (int i = 0; i < adapter->num_queues; i++, rxr++)
4267 ixgbe_enable_queue(adapter, rxr->msix);
4268 for (int i = 0; i < adapter->num_queues; i++, txr++)
4269 ixgbe_enable_queue(adapter, txr->msix);
4271 IXGBE_WRITE_FLUSH(hw);
4277 ixgbe_disable_intr(struct adapter *adapter)
4279 if (adapter->msix_mem)
4280 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4281 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
4282 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
4284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
4285 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
4286 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
4288 IXGBE_WRITE_FLUSH(&adapter->hw);
4293 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
4297 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
4304 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
4306 pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
4313 ** Setup the correct IVAR register for a particular MSIX interrupt
4314 ** (yes this is all very magic and confusing :)
4315 ** - entry is the register array entry
4316 ** - vector is the MSIX vector for this queue
4317 ** - type is RX/TX/MISC
4320 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4322 struct ixgbe_hw *hw = &adapter->hw;
4325 vector |= IXGBE_IVAR_ALLOC_VAL;
4327 switch (hw->mac.type) {
4329 case ixgbe_mac_82598EB:
4331 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4333 entry += (type * 64);
4334 index = (entry >> 2) & 0x1F;
4335 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4336 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4337 ivar |= (vector << (8 * (entry & 0x3)));
4338 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4341 case ixgbe_mac_82599EB:
4342 if (type == -1) { /* MISC IVAR */
4343 index = (entry & 1) * 8;
4344 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4345 ivar &= ~(0xFF << index);
4346 ivar |= (vector << index);
4347 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4348 } else { /* RX/TX IVARS */
4349 index = (16 * (entry & 1)) + (8 * type);
4350 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4351 ivar &= ~(0xFF << index);
4352 ivar |= (vector << index);
4353 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4362 ixgbe_configure_ivars(struct adapter *adapter)
4364 struct tx_ring *txr = adapter->tx_rings;
4365 struct rx_ring *rxr = adapter->rx_rings;
4367 for (int i = 0; i < adapter->num_queues; i++, rxr++)
4368 ixgbe_set_ivar(adapter, i, rxr->msix, 0);
4370 for (int i = 0; i < adapter->num_queues; i++, txr++)
4371 ixgbe_set_ivar(adapter, i, txr->msix, 1);
4373 /* For the Link interrupt */
4374 ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
4378 ** ixgbe_sfp_probe - called in the local timer to
4379 ** determine if a port had optics inserted.
4381 static bool ixgbe_sfp_probe(struct adapter *adapter)
4383 struct ixgbe_hw *hw = &adapter->hw;
4384 device_t dev = adapter->dev;
4385 bool result = FALSE;
4387 if ((hw->phy.type == ixgbe_phy_nl) &&
4388 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4389 s32 ret = hw->phy.ops.identify_sfp(hw);
4392 ret = hw->phy.ops.reset(hw);
4393 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4394 device_printf(dev,"Unsupported SFP+ module detected!");
4395 printf(" Reload driver with supported module.\n");
4396 adapter->sfp_probe = FALSE;
4399 device_printf(dev,"SFP+ module detected!\n");
4400 /* We now have supported optics */
4401 adapter->sfp_probe = FALSE;
4409 ** Tasklet handler for MSIX Link interrupts
4410 ** - do outside interrupt since it might sleep
4413 ixgbe_handle_link(void *context, int pending)
4415 struct adapter *adapter = context;
4417 ixgbe_check_link(&adapter->hw,
4418 &adapter->link_speed, &adapter->link_up, 0);
4419 ixgbe_update_link_status(adapter);
4423 ** Tasklet for handling SFP module interrupts
4426 ixgbe_handle_mod(void *context, int pending)
4428 struct adapter *adapter = context;
4429 struct ixgbe_hw *hw = &adapter->hw;
4430 device_t dev = adapter->dev;
4433 err = hw->phy.ops.identify_sfp(hw);
4434 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4436 "Unsupported SFP+ module type was detected.\n");
4439 hw->mac.ops.setup_sfp(hw);
4440 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
4446 ** Tasklet for handling MSF (multispeed fiber) interrupts
4449 ixgbe_handle_msf(void *context, int pending)
4451 struct adapter *adapter = context;
4452 struct ixgbe_hw *hw = &adapter->hw;
4455 if (hw->mac.ops.get_link_capabilities)
4456 hw->mac.ops.get_link_capabilities(hw, &autoneg,
4458 if (hw->mac.ops.setup_link_speed)
4459 hw->mac.ops.setup_link_speed(hw, autoneg, TRUE, TRUE);
4460 ixgbe_check_link(&adapter->hw,
4461 &adapter->link_speed, &adapter->link_up, 0);
4462 ixgbe_update_link_status(adapter);
4466 /**********************************************************************
4468 * Update the board statistics counters.
4470 **********************************************************************/
4472 ixgbe_update_stats_counters(struct adapter *adapter)
4474 struct ifnet *ifp = adapter->ifp;;
4475 struct ixgbe_hw *hw = &adapter->hw;
4476 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4477 u64 total_missed_rx = 0;
4479 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4481 for (int i = 0; i < 8; i++) {
4482 /* missed_rx tallies misses for the gprc workaround */
4483 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(i));
4484 adapter->stats.mpc[i] += missed_rx;
4485 /* Running comprehensive total for stats display */
4486 total_missed_rx += adapter->stats.mpc[i];
4487 if (hw->mac.type == ixgbe_mac_82598EB)
4488 adapter->stats.rnbc[i] +=
4489 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4492 /* Hardware workaround, gprc counts missed packets */
4493 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4494 adapter->stats.gprc -= missed_rx;
4496 if (hw->mac.type == ixgbe_mac_82599EB) {
4497 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4498 IXGBE_READ_REG(hw, IXGBE_GORCH); /* clears register */
4499 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4500 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* clears register */
4501 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4502 IXGBE_READ_REG(hw, IXGBE_TORH); /* clears register */
4503 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4504 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4506 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4507 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4508 /* 82598 only has a counter in the high register */
4509 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4510 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4511 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4515 * Workaround: mprc hardware is incorrectly counting
4516 * broadcasts, so for now we subtract those.
4518 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4519 adapter->stats.bprc += bprc;
4520 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4521 adapter->stats.mprc -= bprc;
4523 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4524 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4525 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4526 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4527 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4528 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4529 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4530 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4532 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4533 adapter->stats.lxontxc += lxon;
4534 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4535 adapter->stats.lxofftxc += lxoff;
4536 total = lxon + lxoff;
4538 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4539 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4540 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4541 adapter->stats.gptc -= total;
4542 adapter->stats.mptc -= total;
4543 adapter->stats.ptc64 -= total;
4544 adapter->stats.gotc -= total * ETHER_MIN_LEN;
4546 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4547 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4548 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4549 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4550 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4551 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4552 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4553 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4554 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4555 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4558 /* Fill out the OS statistics structure */
4559 ifp->if_ipackets = adapter->stats.gprc;
4560 ifp->if_opackets = adapter->stats.gptc;
4561 ifp->if_ibytes = adapter->stats.gorc;
4562 ifp->if_obytes = adapter->stats.gotc;
4563 ifp->if_imcasts = adapter->stats.mprc;
4564 ifp->if_collisions = 0;
4567 ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
4568 adapter->stats.rlec;
4572 /**********************************************************************
4574 * This routine is called only when ixgbe_display_debug_stats is enabled.
4575 * This routine provides a way to take a look at important statistics
4576 * maintained by the driver and hardware.
4578 **********************************************************************/
4580 ixgbe_print_hw_stats(struct adapter * adapter)
4582 device_t dev = adapter->dev;
4585 device_printf(dev,"Std Mbuf Failed = %lu\n",
4586 adapter->mbuf_defrag_failed);
4587 device_printf(dev,"Missed Packets = %llu\n",
4588 (long long)adapter->stats.mpc[0]);
4589 device_printf(dev,"Receive length errors = %llu\n",
4590 ((long long)adapter->stats.roc +
4591 (long long)adapter->stats.ruc));
4592 device_printf(dev,"Crc errors = %llu\n",
4593 (long long)adapter->stats.crcerrs);
4594 device_printf(dev,"Driver dropped packets = %lu\n",
4595 adapter->dropped_pkts);
4596 device_printf(dev, "watchdog timeouts = %ld\n",
4597 adapter->watchdog_events);
4599 device_printf(dev,"XON Rcvd = %llu\n",
4600 (long long)adapter->stats.lxonrxc);
4601 device_printf(dev,"XON Xmtd = %llu\n",
4602 (long long)adapter->stats.lxontxc);
4603 device_printf(dev,"XOFF Rcvd = %llu\n",
4604 (long long)adapter->stats.lxoffrxc);
4605 device_printf(dev,"XOFF Xmtd = %llu\n",
4606 (long long)adapter->stats.lxofftxc);
4608 device_printf(dev,"Total Packets Rcvd = %llu\n",
4609 (long long)adapter->stats.tpr);
4610 device_printf(dev,"Good Packets Rcvd = %llu\n",
4611 (long long)adapter->stats.gprc);
4612 device_printf(dev,"Good Packets Xmtd = %llu\n",
4613 (long long)adapter->stats.gptc);
4614 device_printf(dev,"TSO Transmissions = %lu\n",
4620 /**********************************************************************
4622 * This routine is called only when em_display_debug_stats is enabled.
4623 * This routine provides a way to take a look at important statistics
4624 * maintained by the driver and hardware.
4626 **********************************************************************/
4628 ixgbe_print_debug_info(struct adapter *adapter)
4630 device_t dev = adapter->dev;
4631 struct rx_ring *rxr = adapter->rx_rings;
4632 struct tx_ring *txr = adapter->tx_rings;
4633 struct ixgbe_hw *hw = &adapter->hw;
4635 device_printf(dev,"Error Byte Count = %u \n",
4636 IXGBE_READ_REG(hw, IXGBE_ERRBC));
4638 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4639 struct lro_ctrl *lro = &rxr->lro;
4640 device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
4641 i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
4642 IXGBE_READ_REG(hw, IXGBE_RDT(i)));
4643 device_printf(dev,"RX(%d) Packets Received: %lld\n",
4644 rxr->me, (long long)rxr->rx_packets);
4645 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
4646 rxr->me, (long long)rxr->rx_split_packets);
4647 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
4648 rxr->me, (long)rxr->rx_bytes);
4649 device_printf(dev,"RX(%d) IRQ Handled: %lu\n",
4650 rxr->me, (long)rxr->rx_irq);
4651 device_printf(dev,"RX(%d) LRO Queued= %d\n",
4652 rxr->me, lro->lro_queued);
4653 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
4654 rxr->me, lro->lro_flushed);
4657 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4658 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
4659 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
4660 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
4661 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
4662 txr->me, (long)txr->total_packets);
4663 device_printf(dev,"TX(%d) IRQ Handled: %lu\n",
4664 txr->me, (long)txr->tx_irq);
4665 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
4666 txr->me, (long)txr->no_tx_desc_avail);
4669 device_printf(dev,"Link IRQ Handled: %lu\n",
4670 (long)adapter->link_irq);
4675 ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS)
4679 struct adapter *adapter;
4682 error = sysctl_handle_int(oidp, &result, 0, req);
4684 if (error || !req->newptr)
4688 adapter = (struct adapter *) arg1;
4689 ixgbe_print_hw_stats(adapter);
4695 ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS)
4698 struct adapter *adapter;
4701 error = sysctl_handle_int(oidp, &result, 0, req);
4703 if (error || !req->newptr)
4707 adapter = (struct adapter *) arg1;
4708 ixgbe_print_debug_info(adapter);
4714 ** Set flow control using sysctl:
4715 ** Flow control values:
4722 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4725 struct adapter *adapter;
4727 error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
4732 adapter = (struct adapter *) arg1;
4733 switch (ixgbe_flow_control) {
4734 case ixgbe_fc_rx_pause:
4735 case ixgbe_fc_tx_pause:
4737 adapter->hw.fc.requested_mode = ixgbe_flow_control;
4741 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4744 ixgbe_fc_enable(&adapter->hw, 0);
4749 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
4750 const char *description, int *limit, int value)
4753 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4754 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4755 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4758 #ifdef IXGBE_IEEE1588
4761 ** ixgbe_hwtstamp_ioctl - control hardware time stamping
4763 ** Outgoing time stamping can be enabled and disabled. Play nice and
4764 ** disable it when requested, although it shouldn't case any overhead
4765 ** when no packet needs it. At most one packet in the queue may be
4766 ** marked for time stamping, otherwise it would be impossible to tell
4767 ** for sure to which packet the hardware time stamp belongs.
4769 ** Incoming time stamping has to be configured via the hardware
4770 ** filters. Not all combinations are supported, in particular event
4771 ** type has to be specified. Matching the kind of event packet is
4772 ** not supported, with the exception of "all V2 events regardless of
4777 ixgbe_hwtstamp_ioctl(struct adapter *adapter, struct ifreq *ifr)
4779 struct ixgbe_hw *hw = &adapter->hw;
4780 struct hwtstamp_ctrl *config;
4781 u32 tsync_tx_ctl_bit = IXGBE_TSYNCTXCTL_ENABLED;
4782 u32 tsync_rx_ctl_bit = IXGBE_TSYNCRXCTL_ENABLED;
4783 u32 tsync_rx_ctl_type = 0;
4784 u32 tsync_rx_cfg = 0;
4787 u16 port = 319; /* PTP */
4790 config = (struct hwtstamp_ctrl *) ifr->ifr_data;
4792 /* reserved for future extensions */
4796 switch (config->tx_type) {
4797 case HWTSTAMP_TX_OFF:
4798 tsync_tx_ctl_bit = 0;
4800 case HWTSTAMP_TX_ON:
4801 tsync_tx_ctl_bit = IXGBE_TSYNCTXCTL_ENABLED;
4807 switch (config->rx_filter) {
4808 case HWTSTAMP_FILTER_NONE:
4809 tsync_rx_ctl_bit = 0;
4811 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4812 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4813 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4814 case HWTSTAMP_FILTER_ALL:
4816 * register TSYNCRXCFG must be set, therefore it is not
4817 * possible to time stamp both Sync and Delay_Req messages
4818 * => fall back to time stamping all packets
4820 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_ALL;
4821 config->rx_filter = HWTSTAMP_FILTER_ALL;
4823 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4824 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L4_V1;
4825 tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4828 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4829 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L4_V1;
4830 tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4833 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4834 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4835 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
4836 tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
4839 config->rx_filter = HWTSTAMP_FILTER_SOME;
4841 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4842 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4843 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
4844 tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
4847 config->rx_filter = HWTSTAMP_FILTER_SOME;
4849 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4850 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4851 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4852 tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
4853 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
4860 /* enable/disable TX */
4861 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
4862 regval = (regval & ~IXGBE_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
4863 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval);
4865 /* enable/disable RX, define which PTP packets are time stamped */
4866 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
4867 regval = (regval & ~IXGBE_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
4868 regval = (regval & ~0xE) | tsync_rx_ctl_type;
4869 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval);
4870 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCFG, tsync_rx_cfg);
4873 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
4874 * (Ethertype to filter on)
4875 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
4876 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
4878 IXGBE_WRITE_REG(hw, IXGBE_ETQF0, is_l2 ? 0x440088f7 : 0);
4880 /* L4 Queue Filter[0]: only filter by source and destination port */
4881 IXGBE_WRITE_REG(hw, IXGBE_SPQF0, htons(port));
4882 IXGBE_WRITE_REG(hw, IXGBE_IMIREXT(0), is_l4 ?
4883 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
4884 IXGBE_WRITE_REG(hw, IXGBE_IMIR(0), is_l4 ?
4886 | (0<<16) /* immediate interrupt disabled */
4887 | 0 /* (1<<17) bit cleared: do not bypass
4888 destination port check */)
4890 IXGBE_WRITE_REG(hw, IXGBE_FTQF0, is_l4 ?
4892 | (1<<15) /* VF not compared */
4893 | (1<<27) /* Enable Timestamping */
4894 | (7<<28) /* only source port filter enabled,
4895 source/target address and protocol
4897 : ((1<<15) | (15<<28) /* all mask bits set = filter not
4902 adapter->hwtstamp_ctrl = config;
4904 /* clear TX/RX time stamp registers, just to be sure */
4905 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
4906 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
4912 ** ixgbe_read_clock - read raw cycle counter (to be used by time counter)
4914 static cycle_t ixgbe_read_clock(const struct cyclecounter *tc)
4916 struct adapter *adapter =
4917 container_of(tc, struct igb_adapter, cycles);
4918 struct ixgbe_hw *hw = &adapter->hw;
4921 stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
4922 stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32ULL;
4927 #endif /* IXGBE_IEEE1588 */