1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
41 /*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44 int ixgbe_display_debug_stats = 0;
46 /*********************************************************************
48 *********************************************************************/
49 char ixgbe_driver_version[] = "2.2.0";
51 /*********************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 *********************************************************************/
61 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
81 /* required last entry */
85 /*********************************************************************
86 * Table of branding strings
87 *********************************************************************/
89 static char *ixgbe_strings[] = {
90 "Intel(R) PRO/10GbE PCI-Express Network Driver"
93 /*********************************************************************
95 *********************************************************************/
96 static int ixgbe_probe(device_t);
97 static int ixgbe_attach(device_t);
98 static int ixgbe_detach(device_t);
99 static int ixgbe_shutdown(device_t);
100 static void ixgbe_start(struct ifnet *);
101 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
102 #if __FreeBSD_version >= 800000
103 static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
104 static int ixgbe_mq_start_locked(struct ifnet *,
105 struct tx_ring *, struct mbuf *);
106 static void ixgbe_qflush(struct ifnet *);
108 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
109 static void ixgbe_init(void *);
110 static int ixgbe_init_locked(struct adapter *);
111 static void ixgbe_stop(void *);
112 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
113 static int ixgbe_media_change(struct ifnet *);
114 static void ixgbe_identify_hardware(struct adapter *);
115 static int ixgbe_allocate_pci_resources(struct adapter *);
116 static int ixgbe_allocate_msix(struct adapter *);
117 static int ixgbe_allocate_legacy(struct adapter *);
118 static int ixgbe_allocate_queues(struct adapter *);
119 static int ixgbe_setup_msix(struct adapter *);
120 static void ixgbe_free_pci_resources(struct adapter *);
121 static void ixgbe_local_timer(void *);
122 static void ixgbe_setup_interface(device_t, struct adapter *);
123 static void ixgbe_config_link(struct adapter *);
125 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
126 static int ixgbe_setup_transmit_structures(struct adapter *);
127 static void ixgbe_setup_transmit_ring(struct tx_ring *);
128 static void ixgbe_initialize_transmit_units(struct adapter *);
129 static void ixgbe_free_transmit_structures(struct adapter *);
130 static void ixgbe_free_transmit_buffers(struct tx_ring *);
132 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
133 static int ixgbe_setup_receive_structures(struct adapter *);
134 static int ixgbe_setup_receive_ring(struct rx_ring *);
135 static void ixgbe_initialize_receive_units(struct adapter *);
136 static void ixgbe_free_receive_structures(struct adapter *);
137 static void ixgbe_free_receive_buffers(struct rx_ring *);
138 static void ixgbe_setup_hw_rsc(struct rx_ring *);
140 static void ixgbe_enable_intr(struct adapter *);
141 static void ixgbe_disable_intr(struct adapter *);
142 static void ixgbe_update_stats_counters(struct adapter *);
143 static bool ixgbe_txeof(struct tx_ring *);
144 static bool ixgbe_rxeof(struct ix_queue *, int);
145 static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
146 static void ixgbe_set_promisc(struct adapter *);
147 static void ixgbe_disable_promisc(struct adapter *);
148 static void ixgbe_set_multi(struct adapter *);
149 static void ixgbe_print_hw_stats(struct adapter *);
150 static void ixgbe_print_debug_info(struct adapter *);
151 static void ixgbe_update_link_status(struct adapter *);
152 static void ixgbe_refresh_mbufs(struct rx_ring *, int);
153 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
154 static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
155 static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
156 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
157 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
158 struct ixgbe_dma_alloc *, int);
159 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
160 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
161 const char *, int *, int);
162 static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
163 static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
164 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
165 static void ixgbe_configure_ivars(struct adapter *);
166 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
168 static void ixgbe_setup_vlan_hw_support(struct adapter *);
169 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
170 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
172 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
173 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
176 /* Support for pluggable optic modules */
177 static bool ixgbe_sfp_probe(struct adapter *);
179 /* Legacy (single vector interrupt handler */
180 static void ixgbe_legacy_irq(void *);
182 /* The MSI/X Interrupt handlers */
183 static void ixgbe_msix_que(void *);
184 static void ixgbe_msix_link(void *);
186 /* Deferred interrupt tasklets */
187 static void ixgbe_handle_que(void *, int);
188 static void ixgbe_handle_link(void *, int);
189 static void ixgbe_handle_msf(void *, int);
190 static void ixgbe_handle_mod(void *, int);
193 static void ixgbe_atr(struct tx_ring *, struct mbuf *);
194 static void ixgbe_reinit_fdir(void *, int);
197 /*********************************************************************
198 * FreeBSD Device Interface Entry Points
199 *********************************************************************/
201 static device_method_t ixgbe_methods[] = {
202 /* Device interface */
203 DEVMETHOD(device_probe, ixgbe_probe),
204 DEVMETHOD(device_attach, ixgbe_attach),
205 DEVMETHOD(device_detach, ixgbe_detach),
206 DEVMETHOD(device_shutdown, ixgbe_shutdown),
210 static driver_t ixgbe_driver = {
211 "ix", ixgbe_methods, sizeof(struct adapter),
214 static devclass_t ixgbe_devclass;
215 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
217 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
218 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
221 ** TUNEABLE PARAMETERS:
225 ** AIM: Adaptive Interrupt Moderation
226 ** which means that the interrupt rate
227 ** is varied over time based on the
228 ** traffic for that interrupt vector
230 static int ixgbe_enable_aim = TRUE;
231 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
233 /* How many packets rxeof tries to clean at a time */
234 static int ixgbe_rx_process_limit = 128;
235 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
237 /* Flow control setting, default to full */
238 static int ixgbe_flow_control = ixgbe_fc_full;
239 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
242 ** Smart speed setting, default to on
243 ** this only works as a compile option
244 ** right now as its during attach, set
245 ** this to 'ixgbe_smart_speed_off' to
248 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
251 * MSIX should be the default for best performance,
252 * but this allows it to be forced off for testing.
254 static int ixgbe_enable_msix = 1;
255 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
258 * Header split has seemed to be beneficial in
259 * most circumstances tested, so its on by default
260 * however this variable will allow it to be disabled
261 * for some debug purposes.
263 static bool ixgbe_header_split = FALSE;
264 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
267 * Number of Queues, can be set to 0,
268 * it then autoconfigures based on the
269 * number of cpus. Each queue is a pair
270 * of RX and TX rings with a msix vector
272 static int ixgbe_num_queues = 0;
273 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
276 ** Number of TX descriptors per ring,
277 ** setting higher than RX as this seems
278 ** the better performing choice.
280 static int ixgbe_txd = PERFORM_TXD;
281 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
283 /* Number of RX descriptors per ring */
284 static int ixgbe_rxd = PERFORM_RXD;
285 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
287 /* Keep running tab on them for sanity check */
288 static int ixgbe_total_ports;
291 ** Shadow VFTA table, this is needed because
292 ** the real filter table gets cleared during
293 ** a soft reset and we need to repopulate it.
295 static u32 ixgbe_shadow_vfta[IXGBE_VFTA_SIZE];
298 ** The number of scatter-gather segments
299 ** differs for 82598 and 82599, default to
302 static int ixgbe_num_segs = IXGBE_82598_SCATTER;
306 ** For Flow Director: this is the
307 ** number of TX packets we sample
308 ** for the filter pool, this means
309 ** every 20th packet will be probed.
311 ** This feature can be disabled by
312 ** setting this to 0.
314 static int atr_sample_rate = 20;
316 ** Flow Director actually 'steals'
317 ** part of the packet buffer as its
318 ** filter pool, this variable controls
320 ** 0 = 64K, 1 = 128K, 2 = 256K
322 static int fdir_pballoc = 1;
325 /*********************************************************************
326 * Device identification routine
328 * ixgbe_probe determines if the driver should be loaded on
329 * adapter based on PCI vendor/device id of the adapter.
331 * return 0 on success, positive on failure
332 *********************************************************************/
335 ixgbe_probe(device_t dev)
337 ixgbe_vendor_info_t *ent;
339 u16 pci_vendor_id = 0;
340 u16 pci_device_id = 0;
341 u16 pci_subvendor_id = 0;
342 u16 pci_subdevice_id = 0;
343 char adapter_name[256];
345 INIT_DEBUGOUT("ixgbe_probe: begin");
347 pci_vendor_id = pci_get_vendor(dev);
348 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
351 pci_device_id = pci_get_device(dev);
352 pci_subvendor_id = pci_get_subvendor(dev);
353 pci_subdevice_id = pci_get_subdevice(dev);
355 ent = ixgbe_vendor_info_array;
356 while (ent->vendor_id != 0) {
357 if ((pci_vendor_id == ent->vendor_id) &&
358 (pci_device_id == ent->device_id) &&
360 ((pci_subvendor_id == ent->subvendor_id) ||
361 (ent->subvendor_id == 0)) &&
363 ((pci_subdevice_id == ent->subdevice_id) ||
364 (ent->subdevice_id == 0))) {
365 sprintf(adapter_name, "%s, Version - %s",
366 ixgbe_strings[ent->index],
367 ixgbe_driver_version);
368 device_set_desc_copy(dev, adapter_name);
377 /*********************************************************************
378 * Device initialization routine
380 * The attach entry point is called when the driver is being loaded.
381 * This routine identifies the type of hardware, allocates all resources
382 * and initializes the hardware.
384 * return 0 on success, positive on failure
385 *********************************************************************/
388 ixgbe_attach(device_t dev)
390 struct adapter *adapter;
393 u16 pci_device_id, csum;
396 INIT_DEBUGOUT("ixgbe_attach: begin");
398 /* Allocate, clear, and link in our adapter structure */
399 adapter = device_get_softc(dev);
400 adapter->dev = adapter->osdep.dev = dev;
404 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
406 /* Keep track of optics */
407 pci_device_id = pci_get_device(dev);
408 switch (pci_device_id) {
409 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
410 case IXGBE_DEV_ID_82598EB_CX4:
411 adapter->optics = IFM_10G_CX4;
413 case IXGBE_DEV_ID_82598:
414 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
415 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
416 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
417 case IXGBE_DEV_ID_82598EB_SFP_LOM:
418 case IXGBE_DEV_ID_82598AT:
419 adapter->optics = IFM_10G_SR;
421 case IXGBE_DEV_ID_82598AT2:
422 adapter->optics = IFM_10G_T;
424 case IXGBE_DEV_ID_82598EB_XF_LR:
425 adapter->optics = IFM_10G_LR;
427 case IXGBE_DEV_ID_82599_SFP:
428 adapter->optics = IFM_10G_SR;
429 ixgbe_num_segs = IXGBE_82599_SCATTER;
431 case IXGBE_DEV_ID_82598_DA_DUAL_PORT :
432 adapter->optics = IFM_10G_TWINAX;
434 case IXGBE_DEV_ID_82599_KX4:
435 case IXGBE_DEV_ID_82599_KX4_MEZZ:
436 case IXGBE_DEV_ID_82599_CX4:
437 adapter->optics = IFM_10G_CX4;
438 ixgbe_num_segs = IXGBE_82599_SCATTER;
440 case IXGBE_DEV_ID_82599_XAUI_LOM:
441 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
442 ixgbe_num_segs = IXGBE_82599_SCATTER;
444 case IXGBE_DEV_ID_82599_T3_LOM:
445 ixgbe_num_segs = IXGBE_82599_SCATTER;
446 adapter->optics = IFM_10G_T;
452 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
453 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
454 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
455 adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
457 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
458 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
459 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
460 adapter, 0, ixgbe_sysctl_debug, "I", "Debug Info");
462 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
463 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
464 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
465 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
467 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
468 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
469 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
470 &ixgbe_enable_aim, 1, "Interrupt Moderation");
472 /* Set up the timer callout */
473 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
475 /* Determine hardware revision */
476 ixgbe_identify_hardware(adapter);
478 /* Do base PCI setup - map BAR0 */
479 if (ixgbe_allocate_pci_resources(adapter)) {
480 device_printf(dev, "Allocation of PCI resources failed\n");
485 /* Do descriptor calc and sanity checks */
486 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
487 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
488 device_printf(dev, "TXD config issue, using default!\n");
489 adapter->num_tx_desc = DEFAULT_TXD;
491 adapter->num_tx_desc = ixgbe_txd;
494 ** With many RX rings it is easy to exceed the
495 ** system mbuf allocation. Tuning nmbclusters
496 ** can alleviate this.
498 if (nmbclusters > 0 ) {
500 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
501 if (s > nmbclusters) {
502 device_printf(dev, "RX Descriptors exceed "
503 "system mbuf max, using default instead!\n");
504 ixgbe_rxd = DEFAULT_RXD;
508 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
509 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
510 device_printf(dev, "RXD config issue, using default!\n");
511 adapter->num_rx_desc = DEFAULT_RXD;
513 adapter->num_rx_desc = ixgbe_rxd;
515 /* Allocate our TX/RX Queues */
516 if (ixgbe_allocate_queues(adapter)) {
521 /* Initialize the shared code */
522 error = ixgbe_init_shared_code(hw);
523 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
525 ** No optics in this port, set up
526 ** so the timer routine will probe
527 ** for later insertion.
529 adapter->sfp_probe = TRUE;
531 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
532 device_printf(dev,"Unsupported SFP+ module detected!\n");
536 device_printf(dev,"Unable to initialize the shared code\n");
541 /* Make sure we have a good EEPROM before we read from it */
542 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
543 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
548 /* Pick up the smart speed setting */
549 if (hw->mac.type == ixgbe_mac_82599EB)
550 hw->phy.smart_speed = ixgbe_smart_speed;
552 /* Get Hardware Flow Control setting */
553 hw->fc.requested_mode = ixgbe_fc_full;
554 hw->fc.pause_time = IXGBE_FC_PAUSE;
555 hw->fc.low_water = IXGBE_FC_LO;
556 hw->fc.high_water = IXGBE_FC_HI;
557 hw->fc.send_xon = TRUE;
559 error = ixgbe_init_hw(hw);
560 if (error == IXGBE_ERR_EEPROM_VERSION) {
561 device_printf(dev, "This device is a pre-production adapter/"
562 "LOM. Please be aware there may be issues associated "
563 "with your hardware.\n If you are experiencing problems "
564 "please contact your Intel or hardware representative "
565 "who provided you with this hardware.\n");
566 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
567 device_printf(dev,"Unsupported SFP+ Module\n");
571 device_printf(dev,"Hardware Initialization Failure\n");
575 if ((adapter->msix > 1) && (ixgbe_enable_msix))
576 error = ixgbe_allocate_msix(adapter);
578 error = ixgbe_allocate_legacy(adapter);
582 /* Setup OS specific network interface */
583 ixgbe_setup_interface(dev, adapter);
585 /* Sysctl for limiting the amount of work done in the taskqueue */
586 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
587 "max number of rx packets to process", &adapter->rx_process_limit,
588 ixgbe_rx_process_limit);
590 /* Initialize statistics */
591 ixgbe_update_stats_counters(adapter);
593 /* Register for VLAN events */
594 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
595 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
596 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
597 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
599 /* Print PCIE bus type/speed/width info */
600 ixgbe_get_bus_info(hw);
601 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
602 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
603 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
604 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
605 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
606 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
609 if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
610 (hw->bus.speed == ixgbe_bus_speed_2500)) {
611 device_printf(dev, "PCI-Express bandwidth available"
612 " for this card\n is not sufficient for"
613 " optimal performance.\n");
614 device_printf(dev, "For optimal performance a x8 "
615 "PCIE, or x4 PCIE 2 slot is required.\n");
618 /* let hardware know driver is loaded */
619 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
620 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
621 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
623 INIT_DEBUGOUT("ixgbe_attach: end");
626 ixgbe_free_transmit_structures(adapter);
627 ixgbe_free_receive_structures(adapter);
629 ixgbe_free_pci_resources(adapter);
634 /*********************************************************************
635 * Device removal routine
637 * The detach entry point is called when the driver is being removed.
638 * This routine stops the adapter and deallocates all the resources
639 * that were allocated for driver operation.
641 * return 0 on success, positive on failure
642 *********************************************************************/
645 ixgbe_detach(device_t dev)
647 struct adapter *adapter = device_get_softc(dev);
648 struct ix_queue *que = adapter->queues;
651 INIT_DEBUGOUT("ixgbe_detach: begin");
653 /* Make sure VLANS are not using driver */
654 if (adapter->ifp->if_vlantrunk != NULL) {
655 device_printf(dev,"Vlan in use, detach first\n");
659 IXGBE_CORE_LOCK(adapter);
661 IXGBE_CORE_UNLOCK(adapter);
663 for (int i = 0; i < adapter->num_queues; i++, que++) {
665 taskqueue_drain(que->tq, &que->que_task);
666 taskqueue_free(que->tq);
670 /* Drain the Link queue */
672 taskqueue_drain(adapter->tq, &adapter->link_task);
673 taskqueue_drain(adapter->tq, &adapter->mod_task);
674 taskqueue_drain(adapter->tq, &adapter->msf_task);
676 taskqueue_drain(adapter->tq, &adapter->fdir_task);
678 taskqueue_free(adapter->tq);
681 /* let hardware know driver is unloading */
682 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
683 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
684 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
686 /* Unregister VLAN events */
687 if (adapter->vlan_attach != NULL)
688 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
689 if (adapter->vlan_detach != NULL)
690 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
692 ether_ifdetach(adapter->ifp);
693 callout_drain(&adapter->timer);
694 ixgbe_free_pci_resources(adapter);
695 bus_generic_detach(dev);
696 if_free(adapter->ifp);
698 ixgbe_free_transmit_structures(adapter);
699 ixgbe_free_receive_structures(adapter);
701 IXGBE_CORE_LOCK_DESTROY(adapter);
705 /*********************************************************************
707 * Shutdown entry point
709 **********************************************************************/
712 ixgbe_shutdown(device_t dev)
714 struct adapter *adapter = device_get_softc(dev);
715 IXGBE_CORE_LOCK(adapter);
717 IXGBE_CORE_UNLOCK(adapter);
722 /*********************************************************************
723 * Transmit entry point
725 * ixgbe_start is called by the stack to initiate a transmit.
726 * The driver will remain in this routine as long as there are
727 * packets to transmit and transmit resources are available.
728 * In case resources are not available stack is notified and
729 * the packet is requeued.
730 **********************************************************************/
733 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
736 struct adapter *adapter = txr->adapter;
738 IXGBE_TX_LOCK_ASSERT(txr);
740 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
743 if (!adapter->link_active)
746 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
748 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
752 if (ixgbe_xmit(txr, &m_head)) {
755 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
756 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
759 /* Send a copy of the frame to the BPF listener */
760 ETHER_BPF_MTAP(ifp, m_head);
762 /* Set watchdog on */
763 txr->watchdog_check = TRUE;
764 txr->watchdog_time = ticks;
771 * Legacy TX start - called by the stack, this
772 * always uses the first tx ring, and should
773 * not be used with multiqueue tx enabled.
776 ixgbe_start(struct ifnet *ifp)
778 struct adapter *adapter = ifp->if_softc;
779 struct tx_ring *txr = adapter->tx_rings;
781 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
783 ixgbe_start_locked(txr, ifp);
784 IXGBE_TX_UNLOCK(txr);
789 #if __FreeBSD_version >= 800000
791 ** Multiqueue Transmit driver
795 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
797 struct adapter *adapter = ifp->if_softc;
801 /* Which queue to use */
802 if ((m->m_flags & M_FLOWID) != 0)
803 i = m->m_pkthdr.flowid % adapter->num_queues;
805 txr = &adapter->tx_rings[i];
807 if (IXGBE_TX_TRYLOCK(txr)) {
808 err = ixgbe_mq_start_locked(ifp, txr, m);
809 IXGBE_TX_UNLOCK(txr);
811 err = drbr_enqueue(ifp, txr->br, m);
817 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
819 struct adapter *adapter = txr->adapter;
821 int enqueued, err = 0;
823 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
824 IFF_DRV_RUNNING || adapter->link_active == 0) {
826 err = drbr_enqueue(ifp, txr->br, m);
832 next = drbr_dequeue(ifp, txr->br);
833 } else if (drbr_needs_enqueue(ifp, txr->br)) {
834 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
836 next = drbr_dequeue(ifp, txr->br);
840 /* Process the queue */
841 while (next != NULL) {
842 if ((err = ixgbe_xmit(txr, &next)) != 0) {
844 err = drbr_enqueue(ifp, txr->br, next);
848 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
849 /* Send a copy of the frame to the BPF listener */
850 ETHER_BPF_MTAP(ifp, next);
851 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
853 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
854 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
857 next = drbr_dequeue(ifp, txr->br);
861 /* Set watchdog on */
862 txr->watchdog_check = TRUE;
863 txr->watchdog_time = ticks;
870 ** Flush all ring buffers
873 ixgbe_qflush(struct ifnet *ifp)
875 struct adapter *adapter = ifp->if_softc;
876 struct tx_ring *txr = adapter->tx_rings;
879 for (int i = 0; i < adapter->num_queues; i++, txr++) {
881 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
883 IXGBE_TX_UNLOCK(txr);
887 #endif /* __FreeBSD_version >= 800000 */
889 /*********************************************************************
892 * ixgbe_ioctl is called when the user wants to configure the
895 * return 0 on success, positive on failure
896 **********************************************************************/
899 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
901 struct adapter *adapter = ifp->if_softc;
902 struct ifreq *ifr = (struct ifreq *) data;
908 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
909 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
912 IXGBE_CORE_LOCK(adapter);
913 ifp->if_mtu = ifr->ifr_mtu;
914 adapter->max_frame_size =
915 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
916 error = ixgbe_init_locked(adapter);
917 IXGBE_CORE_UNLOCK(adapter);
921 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
922 IXGBE_CORE_LOCK(adapter);
923 if (ifp->if_flags & IFF_UP) {
924 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
925 if ((ifp->if_flags ^ adapter->if_flags) &
926 (IFF_PROMISC | IFF_ALLMULTI)) {
927 ixgbe_disable_promisc(adapter);
928 ixgbe_set_promisc(adapter);
931 error = ixgbe_init_locked(adapter);
933 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
935 adapter->if_flags = ifp->if_flags;
936 IXGBE_CORE_UNLOCK(adapter);
940 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
941 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
942 IXGBE_CORE_LOCK(adapter);
943 ixgbe_disable_intr(adapter);
944 ixgbe_set_multi(adapter);
945 ixgbe_enable_intr(adapter);
946 IXGBE_CORE_UNLOCK(adapter);
951 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
952 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
956 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
957 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
958 if (mask & IFCAP_HWCSUM)
959 ifp->if_capenable ^= IFCAP_HWCSUM;
960 if (mask & IFCAP_TSO4)
961 ifp->if_capenable ^= IFCAP_TSO4;
962 if (mask & IFCAP_LRO)
963 ifp->if_capenable ^= IFCAP_LRO;
964 if (mask & IFCAP_VLAN_HWTAGGING)
965 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
966 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
967 IXGBE_CORE_LOCK(adapter);
968 error = ixgbe_init_locked(adapter);
969 IXGBE_CORE_UNLOCK(adapter);
971 VLAN_CAPABILITIES(ifp);
976 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
977 error = ether_ioctl(ifp, command, data);
984 /*********************************************************************
987 * This routine is used in two ways. It is used by the stack as
988 * init entry point in network interface structure. It is also used
989 * by the driver as a hw/sw initialization routine to get to a
992 * return 0 on success, positive on failure
993 **********************************************************************/
994 #define IXGBE_MHADD_MFS_SHIFT 16
997 ixgbe_init_locked(struct adapter *adapter)
999 struct ifnet *ifp = adapter->ifp;
1000 device_t dev = adapter->dev;
1001 struct ixgbe_hw *hw = &adapter->hw;
1002 u32 k, txdctl, mhadd, gpie;
1005 mtx_assert(&adapter->core_mtx, MA_OWNED);
1006 INIT_DEBUGOUT("ixgbe_init: begin");
1008 hw->adapter_stopped = FALSE;
1009 ixgbe_stop_adapter(hw);
1010 callout_stop(&adapter->timer);
1012 /* reprogram the RAR[0] in case user changed it. */
1013 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1015 /* Get the latest mac address, User can use a LAA */
1016 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
1017 IXGBE_ETH_LENGTH_OF_ADDRESS);
1018 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
1019 hw->addr_ctrl.rar_used_count = 1;
1021 /* Prepare transmit descriptors and buffers */
1022 if (ixgbe_setup_transmit_structures(adapter)) {
1023 device_printf(dev,"Could not setup transmit structures\n");
1024 ixgbe_stop(adapter);
1029 ixgbe_initialize_transmit_units(adapter);
1031 /* Setup Multicast table */
1032 ixgbe_set_multi(adapter);
1035 ** Determine the correct mbuf pool
1036 ** for doing jumbo/headersplit
1038 if (ifp->if_mtu > ETHERMTU)
1039 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1041 adapter->rx_mbuf_sz = MCLBYTES;
1043 /* Prepare receive descriptors and buffers */
1044 if (ixgbe_setup_receive_structures(adapter)) {
1045 device_printf(dev,"Could not setup receive structures\n");
1046 ixgbe_stop(adapter);
1050 /* Configure RX settings */
1051 ixgbe_initialize_receive_units(adapter);
1053 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1055 if (hw->mac.type == ixgbe_mac_82599EB) {
1056 gpie |= IXGBE_SDP1_GPIEN;
1057 gpie |= IXGBE_SDP2_GPIEN;
1060 /* Enable Fan Failure Interrupt */
1061 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1062 gpie |= IXGBE_SDP1_GPIEN;
1064 if (adapter->msix > 1) {
1065 /* Enable Enhanced MSIX mode */
1066 gpie |= IXGBE_GPIE_MSIX_MODE;
1067 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1070 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1072 /* Set the various hardware offload abilities */
1073 ifp->if_hwassist = 0;
1074 if (ifp->if_capenable & IFCAP_TSO4)
1075 ifp->if_hwassist |= CSUM_TSO;
1076 if (ifp->if_capenable & IFCAP_TXCSUM)
1077 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1078 #if __FreeBSD_version >= 800000
1079 if (hw->mac.type == ixgbe_mac_82599EB)
1080 ifp->if_hwassist |= CSUM_SCTP;
1083 if (ifp->if_mtu > ETHERMTU) {
1084 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1085 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1086 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1087 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1090 /* Now enable all the queues */
1092 for (int i = 0; i < adapter->num_queues; i++) {
1093 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1094 txdctl |= IXGBE_TXDCTL_ENABLE;
1095 /* Set WTHRESH to 8, burst writeback */
1096 txdctl |= (8 << 16);
1097 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1100 for (int i = 0; i < adapter->num_queues; i++) {
1101 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1102 if (hw->mac.type == ixgbe_mac_82598EB) {
1108 rxdctl &= ~0x3FFFFF;
1111 rxdctl |= IXGBE_RXDCTL_ENABLE;
1112 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1113 for (k = 0; k < 10; k++) {
1114 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1115 IXGBE_RXDCTL_ENABLE)
1121 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1124 /* Set up VLAN offloads and filter */
1125 ixgbe_setup_vlan_hw_support(adapter);
1127 /* Enable Receive engine */
1128 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1129 if (hw->mac.type == ixgbe_mac_82598EB)
1130 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1131 rxctrl |= IXGBE_RXCTRL_RXEN;
1132 ixgbe_enable_rx_dma(hw, rxctrl);
1134 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1136 /* Set up MSI/X routing */
1137 if (ixgbe_enable_msix) {
1138 ixgbe_configure_ivars(adapter);
1139 /* Set up auto-mask */
1140 if (hw->mac.type == ixgbe_mac_82598EB)
1141 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1143 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1144 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1146 } else { /* Simple settings for Legacy/MSI */
1147 ixgbe_set_ivar(adapter, 0, 0, 0);
1148 ixgbe_set_ivar(adapter, 0, 0, 1);
1149 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1153 /* Init Flow director */
1154 if (hw->mac.type == ixgbe_mac_82599EB)
1155 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1159 ** Check on any SFP devices that
1160 ** need to be kick-started
1162 if (hw->phy.type == ixgbe_phy_none) {
1163 int err = hw->phy.ops.identify(hw);
1164 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1166 "Unsupported SFP+ module type was detected.\n");
1171 /* Set moderation on the Link interrupt */
1172 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
1174 /* Config/Enable Link */
1175 ixgbe_config_link(adapter);
1177 /* And now turn on interrupts */
1178 ixgbe_enable_intr(adapter);
1180 /* Now inform the stack we're ready */
1181 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1182 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1188 ixgbe_init(void *arg)
1190 struct adapter *adapter = arg;
1192 IXGBE_CORE_LOCK(adapter);
1193 ixgbe_init_locked(adapter);
1194 IXGBE_CORE_UNLOCK(adapter);
1201 ** MSIX Interrupt Handlers and Tasklets
1206 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1208 struct ixgbe_hw *hw = &adapter->hw;
1209 u64 queue = (u64)(1 << vector);
1212 if (hw->mac.type == ixgbe_mac_82598EB) {
1213 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1214 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1216 mask = (queue & 0xFFFFFFFF);
1218 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1219 mask = (queue >> 32);
1221 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1226 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1228 struct ixgbe_hw *hw = &adapter->hw;
1229 u64 queue = (u64)(1 << vector);
1232 if (hw->mac.type == ixgbe_mac_82598EB) {
1233 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1234 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1236 mask = (queue & 0xFFFFFFFF);
1238 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1239 mask = (queue >> 32);
1241 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1246 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
1250 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1251 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
1252 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1254 mask = (queues & 0xFFFFFFFF);
1255 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
1256 mask = (queues >> 32);
1257 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
1263 ixgbe_handle_que(void *context, int pending)
1265 struct ix_queue *que = context;
1266 struct adapter *adapter = que->adapter;
1267 struct tx_ring *txr = que->txr;
1268 struct ifnet *ifp = adapter->ifp;
1271 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1272 more = ixgbe_rxeof(que, adapter->rx_process_limit);
1275 #if __FreeBSD_version >= 800000
1276 if (!drbr_empty(ifp, txr->br))
1277 ixgbe_mq_start_locked(ifp, txr, NULL);
1279 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1280 ixgbe_start_locked(txr, ifp);
1282 IXGBE_TX_UNLOCK(txr);
1284 taskqueue_enqueue(que->tq, &que->que_task);
1289 /* Reenable this interrupt */
1290 ixgbe_enable_queue(adapter, que->msix);
1295 /*********************************************************************
1297 * Legacy Interrupt Service routine
1299 **********************************************************************/
1302 ixgbe_legacy_irq(void *arg)
1304 struct ix_queue *que = arg;
1305 struct adapter *adapter = que->adapter;
1306 struct ixgbe_hw *hw = &adapter->hw;
1307 struct tx_ring *txr = adapter->tx_rings;
1308 bool more_tx, more_rx;
1309 u32 reg_eicr, loop = MAX_LOOP;
1312 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1315 if (reg_eicr == 0) {
1316 ixgbe_enable_intr(adapter);
1320 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1324 more_tx = ixgbe_txeof(txr);
1325 } while (loop-- && more_tx);
1326 IXGBE_TX_UNLOCK(txr);
1328 if (more_rx || more_tx)
1329 taskqueue_enqueue(que->tq, &que->que_task);
1331 /* Check for fan failure */
1332 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1333 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1334 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1335 "REPLACE IMMEDIATELY!!\n");
1336 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1339 /* Link status change */
1340 if (reg_eicr & IXGBE_EICR_LSC)
1341 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1343 ixgbe_enable_intr(adapter);
1348 /*********************************************************************
1350 * MSI Queue Interrupt Service routine
1352 **********************************************************************/
1354 ixgbe_msix_que(void *arg)
1356 struct ix_queue *que = arg;
1357 struct adapter *adapter = que->adapter;
1358 struct tx_ring *txr = que->txr;
1359 struct rx_ring *rxr = que->rxr;
1360 bool more_tx, more_rx;
1363 ixgbe_disable_queue(adapter, que->msix);
1366 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1369 more_tx = ixgbe_txeof(txr);
1370 IXGBE_TX_UNLOCK(txr);
1372 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1376 if (ixgbe_enable_aim == FALSE)
1379 ** Do Adaptive Interrupt Moderation:
1380 ** - Write out last calculated setting
1381 ** - Calculate based on average size over
1382 ** the last interval.
1384 if (que->eitr_setting)
1385 IXGBE_WRITE_REG(&adapter->hw,
1386 IXGBE_EITR(que->msix), que->eitr_setting);
1388 que->eitr_setting = 0;
1390 /* Idle, do nothing */
1391 if ((txr->bytes == 0) && (rxr->bytes == 0))
1394 if ((txr->bytes) && (txr->packets))
1395 newitr = txr->bytes/txr->packets;
1396 if ((rxr->bytes) && (rxr->packets))
1397 newitr = max(newitr,
1398 (rxr->bytes / rxr->packets));
1399 newitr += 24; /* account for hardware frame, crc */
1401 /* set an upper boundary */
1402 newitr = min(newitr, 3000);
1404 /* Be nice to the mid range */
1405 if ((newitr > 300) && (newitr < 1200))
1406 newitr = (newitr / 3);
1408 newitr = (newitr / 2);
1410 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1411 newitr |= newitr << 16;
1413 newitr |= IXGBE_EITR_CNT_WDIS;
1415 /* save for next interrupt */
1416 que->eitr_setting = newitr;
1425 if (more_tx || more_rx)
1426 taskqueue_enqueue(que->tq, &que->que_task);
1427 else /* Reenable this interrupt */
1428 ixgbe_enable_queue(adapter, que->msix);
1434 ixgbe_msix_link(void *arg)
1436 struct adapter *adapter = arg;
1437 struct ixgbe_hw *hw = &adapter->hw;
1440 ++adapter->link_irq;
1442 /* First get the cause */
1443 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1444 /* Clear interrupt with write */
1445 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1447 /* Link status change */
1448 if (reg_eicr & IXGBE_EICR_LSC)
1449 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1451 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1453 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1454 /* This is probably overkill :) */
1455 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1457 /* Clear the interrupt */
1458 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1459 /* Turn off the interface */
1460 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1461 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1464 if (reg_eicr & IXGBE_EICR_ECC) {
1465 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1466 "Please Reboot!!\n");
1467 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1470 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1471 /* Clear the interrupt */
1472 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1473 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1474 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1475 /* Clear the interrupt */
1476 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1477 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1481 /* Check for fan failure */
1482 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1483 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1484 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1485 "REPLACE IMMEDIATELY!!\n");
1486 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1489 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1493 /*********************************************************************
1495 * Media Ioctl callback
1497 * This routine is called whenever the user queries the status of
1498 * the interface using ifconfig.
1500 **********************************************************************/
1502 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1504 struct adapter *adapter = ifp->if_softc;
1506 INIT_DEBUGOUT("ixgbe_media_status: begin");
1507 IXGBE_CORE_LOCK(adapter);
1508 ixgbe_update_link_status(adapter);
1510 ifmr->ifm_status = IFM_AVALID;
1511 ifmr->ifm_active = IFM_ETHER;
1513 if (!adapter->link_active) {
1514 IXGBE_CORE_UNLOCK(adapter);
1518 ifmr->ifm_status |= IFM_ACTIVE;
1520 switch (adapter->link_speed) {
1521 case IXGBE_LINK_SPEED_1GB_FULL:
1522 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1524 case IXGBE_LINK_SPEED_10GB_FULL:
1525 ifmr->ifm_active |= adapter->optics | IFM_FDX;
1529 IXGBE_CORE_UNLOCK(adapter);
1534 /*********************************************************************
1536 * Media Ioctl callback
1538 * This routine is called when the user changes speed/duplex using
1539 * media/mediopt option with ifconfig.
1541 **********************************************************************/
1543 ixgbe_media_change(struct ifnet * ifp)
1545 struct adapter *adapter = ifp->if_softc;
1546 struct ifmedia *ifm = &adapter->media;
1548 INIT_DEBUGOUT("ixgbe_media_change: begin");
1550 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1553 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1555 adapter->hw.phy.autoneg_advertised =
1556 IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
1559 device_printf(adapter->dev, "Only auto media type\n");
1566 /*********************************************************************
1568 * This routine maps the mbufs to tx descriptors, allowing the
1569 * TX engine to transmit the packets.
1570 * - return 0 on success, positive on failure
1572 **********************************************************************/
1575 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1577 struct adapter *adapter = txr->adapter;
1578 u32 olinfo_status = 0, cmd_type_len;
1580 int i, j, error, nsegs;
1581 int first, last = 0;
1582 struct mbuf *m_head;
1583 bus_dma_segment_t segs[ixgbe_num_segs];
1585 struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1586 union ixgbe_adv_tx_desc *txd = NULL;
1590 /* Basic descriptor defines */
1591 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1592 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1594 if (m_head->m_flags & M_VLANTAG)
1595 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1598 * Important to capture the first descriptor
1599 * used because it will contain the index of
1600 * the one we tell the hardware to report back
1602 first = txr->next_avail_desc;
1603 txbuf = &txr->tx_buffers[first];
1604 txbuf_mapped = txbuf;
1608 * Map the packet for DMA.
1610 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1611 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1613 if (error == EFBIG) {
1616 m = m_defrag(*m_headp, M_DONTWAIT);
1618 adapter->mbuf_defrag_failed++;
1626 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1627 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1629 if (error == ENOMEM) {
1630 adapter->no_tx_dma_setup++;
1632 } else if (error != 0) {
1633 adapter->no_tx_dma_setup++;
1638 } else if (error == ENOMEM) {
1639 adapter->no_tx_dma_setup++;
1641 } else if (error != 0) {
1642 adapter->no_tx_dma_setup++;
1648 /* Make certain there are enough descriptors */
1649 if (nsegs > txr->tx_avail - 2) {
1650 txr->no_desc_avail++;
1657 ** Set up the appropriate offload context
1658 ** this becomes the first descriptor of
1661 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1662 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1663 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1664 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1665 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1666 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1670 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1671 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1673 #ifdef IXGBE_IEEE1588
1674 /* This is changing soon to an mtag detection */
1675 if (we detect this mbuf has a TSTAMP mtag)
1676 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
1680 /* Do the flow director magic */
1681 if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
1683 if (txr->atr_count >= atr_sample_rate) {
1684 ixgbe_atr(txr, m_head);
1689 /* Record payload length */
1691 olinfo_status |= m_head->m_pkthdr.len <<
1692 IXGBE_ADVTXD_PAYLEN_SHIFT;
1694 i = txr->next_avail_desc;
1695 for (j = 0; j < nsegs; j++) {
1699 txbuf = &txr->tx_buffers[i];
1700 txd = &txr->tx_base[i];
1701 seglen = segs[j].ds_len;
1702 segaddr = htole64(segs[j].ds_addr);
1704 txd->read.buffer_addr = segaddr;
1705 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1706 cmd_type_len |seglen);
1707 txd->read.olinfo_status = htole32(olinfo_status);
1708 last = i; /* descriptor that will get completion IRQ */
1710 if (++i == adapter->num_tx_desc)
1713 txbuf->m_head = NULL;
1714 txbuf->eop_index = -1;
1717 txd->read.cmd_type_len |=
1718 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1719 txr->tx_avail -= nsegs;
1720 txr->next_avail_desc = i;
1722 txbuf->m_head = m_head;
1724 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1726 /* Set the index of the descriptor that will be marked done */
1727 txbuf = &txr->tx_buffers[first];
1728 txbuf->eop_index = last;
1730 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1731 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1733 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1734 * hardware that this frame is available to transmit.
1736 ++txr->total_packets;
1737 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1739 /* Do a clean if descriptors are low */
1740 if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD)
1746 bus_dmamap_unload(txr->txtag, txbuf->map);
1752 ixgbe_set_promisc(struct adapter *adapter)
1756 struct ifnet *ifp = adapter->ifp;
1758 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1760 if (ifp->if_flags & IFF_PROMISC) {
1761 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1762 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1763 } else if (ifp->if_flags & IFF_ALLMULTI) {
1764 reg_rctl |= IXGBE_FCTRL_MPE;
1765 reg_rctl &= ~IXGBE_FCTRL_UPE;
1766 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1772 ixgbe_disable_promisc(struct adapter * adapter)
1776 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1778 reg_rctl &= (~IXGBE_FCTRL_UPE);
1779 reg_rctl &= (~IXGBE_FCTRL_MPE);
1780 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1786 /*********************************************************************
1789 * This routine is called whenever multicast address list is updated.
1791 **********************************************************************/
1792 #define IXGBE_RAR_ENTRIES 16
1795 ixgbe_set_multi(struct adapter *adapter)
1798 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1800 struct ifmultiaddr *ifma;
1802 struct ifnet *ifp = adapter->ifp;
1804 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1806 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1807 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1808 if (ifp->if_flags & IFF_PROMISC)
1809 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1810 else if (ifp->if_flags & IFF_ALLMULTI) {
1811 fctrl |= IXGBE_FCTRL_MPE;
1812 fctrl &= ~IXGBE_FCTRL_UPE;
1814 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1816 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1818 #if __FreeBSD_version < 800000
1821 if_maddr_rlock(ifp);
1823 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1824 if (ifma->ifma_addr->sa_family != AF_LINK)
1826 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1827 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1828 IXGBE_ETH_LENGTH_OF_ADDRESS);
1831 #if __FreeBSD_version < 800000
1832 IF_ADDR_UNLOCK(ifp);
1834 if_maddr_runlock(ifp);
1838 ixgbe_update_mc_addr_list(&adapter->hw,
1839 update_ptr, mcnt, ixgbe_mc_array_itr);
1845 * This is an iterator function now needed by the multicast
1846 * shared code. It simply feeds the shared code routine the
1847 * addresses in the array of ixgbe_set_multi() one by one.
1850 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1852 u8 *addr = *update_ptr;
1856 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1857 *update_ptr = newptr;
1862 /*********************************************************************
1865 * This routine checks for link status,updates statistics,
1866 * and runs the watchdog check.
1868 **********************************************************************/
1871 ixgbe_local_timer(void *arg)
1873 struct adapter *adapter = arg;
1874 struct ifnet *ifp = adapter->ifp;
1875 device_t dev = adapter->dev;
1876 struct tx_ring *txr = adapter->tx_rings;
1878 mtx_assert(&adapter->core_mtx, MA_OWNED);
1880 /* Check for pluggable optics */
1881 if (adapter->sfp_probe)
1882 if (!ixgbe_sfp_probe(adapter))
1883 goto out; /* Nothing to do */
1885 ixgbe_update_link_status(adapter);
1886 ixgbe_update_stats_counters(adapter);
1889 if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
1890 ixgbe_print_hw_stats(adapter);
1893 * If the interface has been paused
1894 * then don't do the watchdog check
1896 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1899 ** Check for time since any descriptor was cleaned
1901 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1903 if (txr->watchdog_check == FALSE) {
1904 IXGBE_TX_UNLOCK(txr);
1907 if ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG)
1909 IXGBE_TX_UNLOCK(txr);
1912 ixgbe_rearm_queues(adapter, adapter->que_mask);
1913 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1917 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1918 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1919 IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
1920 IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
1921 device_printf(dev,"TX(%d) desc avail = %d,"
1922 "Next TX to Clean = %d\n",
1923 txr->me, txr->tx_avail, txr->next_to_clean);
1924 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1925 adapter->watchdog_events++;
1926 IXGBE_TX_UNLOCK(txr);
1927 ixgbe_init_locked(adapter);
1931 ** Note: this routine updates the OS on the link state
1932 ** the real check of the hardware only happens with
1933 ** a link interrupt.
1936 ixgbe_update_link_status(struct adapter *adapter)
1938 struct ifnet *ifp = adapter->ifp;
1939 struct tx_ring *txr = adapter->tx_rings;
1940 device_t dev = adapter->dev;
1943 if (adapter->link_up){
1944 if (adapter->link_active == FALSE) {
1946 device_printf(dev,"Link is up %d Gbps %s \n",
1947 ((adapter->link_speed == 128)? 10:1),
1949 adapter->link_active = TRUE;
1950 if_link_state_change(ifp, LINK_STATE_UP);
1952 } else { /* Link down */
1953 if (adapter->link_active == TRUE) {
1955 device_printf(dev,"Link is Down\n");
1956 if_link_state_change(ifp, LINK_STATE_DOWN);
1957 adapter->link_active = FALSE;
1958 for (int i = 0; i < adapter->num_queues;
1960 txr->watchdog_check = FALSE;
1968 /*********************************************************************
1970 * This routine disables all traffic on the adapter by issuing a
1971 * global reset on the MAC and deallocates TX/RX buffers.
1973 **********************************************************************/
1976 ixgbe_stop(void *arg)
1979 struct adapter *adapter = arg;
1982 mtx_assert(&adapter->core_mtx, MA_OWNED);
1984 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1985 ixgbe_disable_intr(adapter);
1987 /* Tell the stack that the interface is no longer active */
1988 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1990 ixgbe_reset_hw(&adapter->hw);
1991 adapter->hw.adapter_stopped = FALSE;
1992 ixgbe_stop_adapter(&adapter->hw);
1993 callout_stop(&adapter->timer);
1995 /* reprogram the RAR[0] in case user changed it. */
1996 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2002 /*********************************************************************
2004 * Determine hardware revision.
2006 **********************************************************************/
2008 ixgbe_identify_hardware(struct adapter *adapter)
2010 device_t dev = adapter->dev;
2012 /* Save off the information about this board */
2013 adapter->hw.vendor_id = pci_get_vendor(dev);
2014 adapter->hw.device_id = pci_get_device(dev);
2015 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2016 adapter->hw.subsystem_vendor_id =
2017 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2018 adapter->hw.subsystem_device_id =
2019 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2024 /*********************************************************************
2026 * Setup the Legacy or MSI Interrupt handler
2028 **********************************************************************/
2030 ixgbe_allocate_legacy(struct adapter *adapter)
2032 device_t dev = adapter->dev;
2033 struct ix_queue *que = adapter->queues;
2037 if (adapter->msix == 1)
2040 /* We allocate a single interrupt resource */
2041 adapter->res = bus_alloc_resource_any(dev,
2042 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2043 if (adapter->res == NULL) {
2044 device_printf(dev, "Unable to allocate bus resource: "
2050 * Try allocating a fast interrupt and the associated deferred
2051 * processing contexts.
2053 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2054 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2055 taskqueue_thread_enqueue, &que->tq);
2056 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2057 device_get_nameunit(adapter->dev));
2059 /* Tasklets for Link, SFP and Multispeed Fiber */
2060 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2061 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2062 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2064 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2066 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2067 taskqueue_thread_enqueue, &adapter->tq);
2068 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2069 device_get_nameunit(adapter->dev));
2071 if ((error = bus_setup_intr(dev, adapter->res,
2072 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2073 que, &adapter->tag)) != 0) {
2074 device_printf(dev, "Failed to register fast interrupt "
2075 "handler: %d\n", error);
2076 taskqueue_free(que->tq);
2077 taskqueue_free(adapter->tq);
2082 /* For simplicity in the handlers */
2083 adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
2089 /*********************************************************************
2091 * Setup MSIX Interrupt resources and handlers
2093 **********************************************************************/
2095 ixgbe_allocate_msix(struct adapter *adapter)
2097 device_t dev = adapter->dev;
2098 struct ix_queue *que = adapter->queues;
2099 int error, rid, vector = 0;
2101 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
2103 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2104 RF_SHAREABLE | RF_ACTIVE);
2105 if (que->res == NULL) {
2106 device_printf(dev,"Unable to allocate"
2107 " bus resource: que interrupt [%d]\n", vector);
2110 /* Set the handler function */
2111 error = bus_setup_intr(dev, que->res,
2112 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2113 ixgbe_msix_que, que, &que->tag);
2116 device_printf(dev, "Failed to register QUE handler");
2120 adapter->que_mask |= (u64)(1 << que->msix);
2122 ** Bind the msix vector, and thus the
2123 ** ring to the corresponding cpu.
2125 if (adapter->num_queues > 1)
2126 bus_bind_intr(dev, que->res, i);
2128 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2129 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2130 taskqueue_thread_enqueue, &que->tq);
2131 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2132 device_get_nameunit(adapter->dev));
2137 adapter->res = bus_alloc_resource_any(dev,
2138 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2139 if (!adapter->res) {
2140 device_printf(dev,"Unable to allocate"
2141 " bus resource: Link interrupt [%d]\n", rid);
2144 /* Set the link handler function */
2145 error = bus_setup_intr(dev, adapter->res,
2146 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2147 ixgbe_msix_link, adapter, &adapter->tag);
2149 adapter->res = NULL;
2150 device_printf(dev, "Failed to register LINK handler");
2153 adapter->linkvec = vector;
2154 /* Tasklets for Link, SFP and Multispeed Fiber */
2155 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2156 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2157 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2159 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2161 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2162 taskqueue_thread_enqueue, &adapter->tq);
2163 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2164 device_get_nameunit(adapter->dev));
2170 * Setup Either MSI/X or MSI
2173 ixgbe_setup_msix(struct adapter *adapter)
2175 device_t dev = adapter->dev;
2176 int rid, want, queues, msgs;
2178 /* Override by tuneable */
2179 if (ixgbe_enable_msix == 0)
2182 /* First try MSI/X */
2183 rid = PCIR_BAR(MSIX_82598_BAR);
2184 adapter->msix_mem = bus_alloc_resource_any(dev,
2185 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2186 if (!adapter->msix_mem) {
2187 rid += 4; /* 82599 maps in higher BAR */
2188 adapter->msix_mem = bus_alloc_resource_any(dev,
2189 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2191 if (!adapter->msix_mem) {
2192 /* May not be enabled */
2193 device_printf(adapter->dev,
2194 "Unable to map MSIX table \n");
2198 msgs = pci_msix_count(dev);
2199 if (msgs == 0) { /* system has msix disabled */
2200 bus_release_resource(dev, SYS_RES_MEMORY,
2201 rid, adapter->msix_mem);
2202 adapter->msix_mem = NULL;
2206 /* Figure out a reasonable auto config value */
2207 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2209 if (ixgbe_num_queues != 0)
2210 queues = ixgbe_num_queues;
2213 ** Want one vector (RX/TX pair) per queue
2214 ** plus an additional for Link.
2220 device_printf(adapter->dev,
2221 "MSIX Configuration Problem, "
2222 "%d vectors but %d queues wanted!\n",
2224 return (0); /* Will go to Legacy setup */
2226 if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
2227 device_printf(adapter->dev,
2228 "Using MSIX interrupts with %d vectors\n", msgs);
2229 adapter->num_queues = queues;
2233 msgs = pci_msi_count(dev);
2234 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2235 device_printf(adapter->dev,"Using MSI interrupt\n");
2241 ixgbe_allocate_pci_resources(struct adapter *adapter)
2244 device_t dev = adapter->dev;
2247 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2250 if (!(adapter->pci_mem)) {
2251 device_printf(dev,"Unable to allocate bus resource: memory\n");
2255 adapter->osdep.mem_bus_space_tag =
2256 rman_get_bustag(adapter->pci_mem);
2257 adapter->osdep.mem_bus_space_handle =
2258 rman_get_bushandle(adapter->pci_mem);
2259 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2261 /* Legacy defaults */
2262 adapter->num_queues = 1;
2263 adapter->hw.back = &adapter->osdep;
2266 ** Now setup MSI or MSI/X, should
2267 ** return us the number of supported
2268 ** vectors. (Will be 1 for MSI)
2270 adapter->msix = ixgbe_setup_msix(adapter);
2275 ixgbe_free_pci_resources(struct adapter * adapter)
2277 struct ix_queue *que = adapter->queues;
2278 device_t dev = adapter->dev;
2281 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2282 memrid = PCIR_BAR(MSIX_82598_BAR);
2284 memrid = PCIR_BAR(MSIX_82599_BAR);
2287 ** There is a slight possibility of a failure mode
2288 ** in attach that will result in entering this function
2289 ** before interrupt resources have been initialized, and
2290 ** in that case we do not want to execute the loops below
2291 ** We can detect this reliably by the state of the adapter
2294 if (adapter->res == NULL)
2298 ** Release all msix queue resources:
2300 for (int i = 0; i < adapter->num_queues; i++, que++) {
2301 rid = que->msix + 1;
2302 if (que->tag != NULL) {
2303 bus_teardown_intr(dev, que->res, que->tag);
2306 if (que->res != NULL)
2307 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2311 /* Clean the Legacy or Link interrupt last */
2312 if (adapter->linkvec) /* we are doing MSIX */
2313 rid = adapter->linkvec + 1;
2315 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2317 if (adapter->tag != NULL) {
2318 bus_teardown_intr(dev, adapter->res, adapter->tag);
2319 adapter->tag = NULL;
2321 if (adapter->res != NULL)
2322 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2326 pci_release_msi(dev);
2328 if (adapter->msix_mem != NULL)
2329 bus_release_resource(dev, SYS_RES_MEMORY,
2330 memrid, adapter->msix_mem);
2332 if (adapter->pci_mem != NULL)
2333 bus_release_resource(dev, SYS_RES_MEMORY,
2334 PCIR_BAR(0), adapter->pci_mem);
2339 /*********************************************************************
2341 * Setup networking device structure and register an interface.
2343 **********************************************************************/
2345 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2347 struct ixgbe_hw *hw = &adapter->hw;
2350 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2352 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2354 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
2355 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2356 ifp->if_mtu = ETHERMTU;
2357 ifp->if_baudrate = 1000000000;
2358 ifp->if_init = ixgbe_init;
2359 ifp->if_softc = adapter;
2360 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2361 ifp->if_ioctl = ixgbe_ioctl;
2362 ifp->if_start = ixgbe_start;
2363 #if __FreeBSD_version >= 800000
2364 ifp->if_transmit = ixgbe_mq_start;
2365 ifp->if_qflush = ixgbe_qflush;
2367 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
2369 ether_ifattach(ifp, adapter->hw.mac.addr);
2371 adapter->max_frame_size =
2372 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2375 * Tell the upper layer(s) we support long frames.
2377 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2379 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
2380 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2381 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
2383 ifp->if_capenable = ifp->if_capabilities;
2386 * Specify the media types supported by this adapter and register
2387 * callbacks to update media and link information
2389 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2390 ixgbe_media_status);
2391 ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics |
2393 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2394 ifmedia_add(&adapter->media,
2395 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2396 ifmedia_add(&adapter->media,
2397 IFM_ETHER | IFM_1000_T, 0, NULL);
2399 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2400 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2406 ixgbe_config_link(struct adapter *adapter)
2408 struct ixgbe_hw *hw = &adapter->hw;
2409 u32 autoneg, err = 0;
2410 bool sfp, negotiate;
2412 sfp = ixgbe_is_sfp(hw);
2415 if (hw->phy.multispeed_fiber) {
2416 hw->mac.ops.setup_sfp(hw);
2417 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2419 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2421 if (hw->mac.ops.check_link)
2422 err = ixgbe_check_link(hw, &autoneg,
2423 &adapter->link_up, FALSE);
2426 autoneg = hw->phy.autoneg_advertised;
2427 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2428 err = hw->mac.ops.get_link_capabilities(hw,
2429 &autoneg, &negotiate);
2432 if (hw->mac.ops.setup_link)
2433 err = hw->mac.ops.setup_link(hw, autoneg,
2434 negotiate, adapter->link_up);
2440 /********************************************************************
2441 * Manage DMA'able memory.
2442 *******************************************************************/
2444 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2448 *(bus_addr_t *) arg = segs->ds_addr;
2453 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2454 struct ixgbe_dma_alloc *dma, int mapflags)
2456 device_t dev = adapter->dev;
2459 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2460 DBA_ALIGN, 0, /* alignment, bounds */
2461 BUS_SPACE_MAXADDR, /* lowaddr */
2462 BUS_SPACE_MAXADDR, /* highaddr */
2463 NULL, NULL, /* filter, filterarg */
2466 size, /* maxsegsize */
2467 BUS_DMA_ALLOCNOW, /* flags */
2468 NULL, /* lockfunc */
2469 NULL, /* lockfuncarg */
2472 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2476 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2477 BUS_DMA_NOWAIT, &dma->dma_map);
2479 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2483 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2487 mapflags | BUS_DMA_NOWAIT);
2489 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2493 dma->dma_size = size;
2496 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2498 bus_dma_tag_destroy(dma->dma_tag);
2500 dma->dma_map = NULL;
2501 dma->dma_tag = NULL;
2506 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2508 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2509 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2510 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2511 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2512 bus_dma_tag_destroy(dma->dma_tag);
2516 /*********************************************************************
2518 * Allocate memory for the transmit and receive rings, and then
2519 * the descriptors associated with each, called only once at attach.
2521 **********************************************************************/
2523 ixgbe_allocate_queues(struct adapter *adapter)
2525 device_t dev = adapter->dev;
2526 struct ix_queue *que;
2527 struct tx_ring *txr;
2528 struct rx_ring *rxr;
2529 int rsize, tsize, error = IXGBE_SUCCESS;
2530 int txconf = 0, rxconf = 0;
2532 /* First allocate the top level queue structs */
2533 if (!(adapter->queues =
2534 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2535 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2536 device_printf(dev, "Unable to allocate queue memory\n");
2541 /* First allocate the TX ring struct memory */
2542 if (!(adapter->tx_rings =
2543 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2544 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2545 device_printf(dev, "Unable to allocate TX ring memory\n");
2550 /* Next allocate the RX */
2551 if (!(adapter->rx_rings =
2552 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2553 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2554 device_printf(dev, "Unable to allocate RX ring memory\n");
2559 /* For the ring itself */
2560 tsize = roundup2(adapter->num_tx_desc *
2561 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2564 * Now set up the TX queues, txconf is needed to handle the
2565 * possibility that things fail midcourse and we need to
2566 * undo memory gracefully
2568 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2569 /* Set up some basics */
2570 txr = &adapter->tx_rings[i];
2571 txr->adapter = adapter;
2574 /* Initialize the TX side lock */
2575 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2576 device_get_nameunit(dev), txr->me);
2577 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2579 if (ixgbe_dma_malloc(adapter, tsize,
2580 &txr->txdma, BUS_DMA_NOWAIT)) {
2582 "Unable to allocate TX Descriptor memory\n");
2586 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2587 bzero((void *)txr->tx_base, tsize);
2589 /* Now allocate transmit buffers for the ring */
2590 if (ixgbe_allocate_transmit_buffers(txr)) {
2592 "Critical Failure setting up transmit buffers\n");
2596 #if __FreeBSD_version >= 800000
2597 /* Allocate a buf ring */
2598 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2599 M_WAITOK, &txr->tx_mtx);
2600 if (txr->br == NULL) {
2602 "Critical Failure setting up buf ring\n");
2610 * Next the RX queues...
2612 rsize = roundup2(adapter->num_rx_desc *
2613 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2614 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2615 rxr = &adapter->rx_rings[i];
2616 /* Set up some basics */
2617 rxr->adapter = adapter;
2620 /* Initialize the RX side lock */
2621 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2622 device_get_nameunit(dev), rxr->me);
2623 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2625 if (ixgbe_dma_malloc(adapter, rsize,
2626 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2628 "Unable to allocate RxDescriptor memory\n");
2632 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2633 bzero((void *)rxr->rx_base, rsize);
2635 /* Allocate receive buffers for the ring*/
2636 if (ixgbe_allocate_receive_buffers(rxr)) {
2638 "Critical Failure setting up receive buffers\n");
2645 ** Finally set up the queue holding structs
2647 for (int i = 0; i < adapter->num_queues; i++) {
2648 que = &adapter->queues[i];
2649 que->adapter = adapter;
2650 que->txr = &adapter->tx_rings[i];
2651 que->rxr = &adapter->rx_rings[i];
2657 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2658 ixgbe_dma_free(adapter, &rxr->rxdma);
2660 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2661 ixgbe_dma_free(adapter, &txr->txdma);
2662 free(adapter->rx_rings, M_DEVBUF);
2664 free(adapter->tx_rings, M_DEVBUF);
2666 free(adapter->queues, M_DEVBUF);
2671 /*********************************************************************
2673 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2674 * the information needed to transmit a packet on the wire. This is
2675 * called only once at attach, setup is done every reset.
2677 **********************************************************************/
2679 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2681 struct adapter *adapter = txr->adapter;
2682 device_t dev = adapter->dev;
2683 struct ixgbe_tx_buf *txbuf;
2687 * Setup DMA descriptor areas.
2689 if ((error = bus_dma_tag_create(NULL, /* parent */
2690 1, 0, /* alignment, bounds */
2691 BUS_SPACE_MAXADDR, /* lowaddr */
2692 BUS_SPACE_MAXADDR, /* highaddr */
2693 NULL, NULL, /* filter, filterarg */
2694 IXGBE_TSO_SIZE, /* maxsize */
2695 ixgbe_num_segs, /* nsegments */
2696 PAGE_SIZE, /* maxsegsize */
2698 NULL, /* lockfunc */
2699 NULL, /* lockfuncarg */
2701 device_printf(dev,"Unable to allocate TX DMA tag\n");
2705 if (!(txr->tx_buffers =
2706 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2707 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2708 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2713 /* Create the descriptor buffer dma maps */
2714 txbuf = txr->tx_buffers;
2715 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2716 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2718 device_printf(dev, "Unable to create TX DMA map\n");
2725 /* We free all, it handles case where we are in the middle */
2726 ixgbe_free_transmit_structures(adapter);
2730 /*********************************************************************
2732 * Initialize a transmit ring.
2734 **********************************************************************/
2736 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2738 struct adapter *adapter = txr->adapter;
2739 struct ixgbe_tx_buf *txbuf;
2742 /* Clear the old ring contents */
2744 bzero((void *)txr->tx_base,
2745 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2747 txr->next_avail_desc = 0;
2748 txr->next_to_clean = 0;
2750 /* Free any existing tx buffers. */
2751 txbuf = txr->tx_buffers;
2752 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2753 if (txbuf->m_head != NULL) {
2754 bus_dmamap_sync(txr->txtag, txbuf->map,
2755 BUS_DMASYNC_POSTWRITE);
2756 bus_dmamap_unload(txr->txtag, txbuf->map);
2757 m_freem(txbuf->m_head);
2758 txbuf->m_head = NULL;
2760 /* Clear the EOP index */
2761 txbuf->eop_index = -1;
2765 /* Set the rate at which we sample packets */
2766 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
2767 txr->atr_sample = atr_sample_rate;
2770 /* Set number of descriptors available */
2771 txr->tx_avail = adapter->num_tx_desc;
2773 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2774 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2775 IXGBE_TX_UNLOCK(txr);
2778 /*********************************************************************
2780 * Initialize all transmit rings.
2782 **********************************************************************/
2784 ixgbe_setup_transmit_structures(struct adapter *adapter)
2786 struct tx_ring *txr = adapter->tx_rings;
2788 for (int i = 0; i < adapter->num_queues; i++, txr++)
2789 ixgbe_setup_transmit_ring(txr);
2794 /*********************************************************************
2796 * Enable transmit unit.
2798 **********************************************************************/
2800 ixgbe_initialize_transmit_units(struct adapter *adapter)
2802 struct tx_ring *txr = adapter->tx_rings;
2803 struct ixgbe_hw *hw = &adapter->hw;
2805 /* Setup the Base and Length of the Tx Descriptor Ring */
2807 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2808 u64 tdba = txr->txdma.dma_paddr;
2811 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2812 (tdba & 0x00000000ffffffffULL));
2813 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2814 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2815 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2817 /* Setup the HW Tx Head and Tail descriptor pointers */
2818 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2819 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2821 /* Setup Transmit Descriptor Cmd Settings */
2822 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2823 txr->watchdog_check = FALSE;
2825 /* Disable Head Writeback */
2826 switch (hw->mac.type) {
2827 case ixgbe_mac_82598EB:
2828 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2830 case ixgbe_mac_82599EB:
2832 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2835 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2836 switch (hw->mac.type) {
2837 case ixgbe_mac_82598EB:
2838 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2840 case ixgbe_mac_82599EB:
2842 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2848 if (hw->mac.type == ixgbe_mac_82599EB) {
2849 u32 dmatxctl, rttdcs;
2850 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2851 dmatxctl |= IXGBE_DMATXCTL_TE;
2852 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2853 /* Disable arbiter to set MTQC */
2854 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2855 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2856 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2857 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2858 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2859 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2865 /*********************************************************************
2867 * Free all transmit rings.
2869 **********************************************************************/
2871 ixgbe_free_transmit_structures(struct adapter *adapter)
2873 struct tx_ring *txr = adapter->tx_rings;
2875 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2877 ixgbe_free_transmit_buffers(txr);
2878 ixgbe_dma_free(adapter, &txr->txdma);
2879 IXGBE_TX_UNLOCK(txr);
2880 IXGBE_TX_LOCK_DESTROY(txr);
2882 free(adapter->tx_rings, M_DEVBUF);
2885 /*********************************************************************
2887 * Free transmit ring related data structures.
2889 **********************************************************************/
2891 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2893 struct adapter *adapter = txr->adapter;
2894 struct ixgbe_tx_buf *tx_buffer;
2897 INIT_DEBUGOUT("free_transmit_ring: begin");
2899 if (txr->tx_buffers == NULL)
2902 tx_buffer = txr->tx_buffers;
2903 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2904 if (tx_buffer->m_head != NULL) {
2905 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2906 BUS_DMASYNC_POSTWRITE);
2907 bus_dmamap_unload(txr->txtag,
2909 m_freem(tx_buffer->m_head);
2910 tx_buffer->m_head = NULL;
2911 if (tx_buffer->map != NULL) {
2912 bus_dmamap_destroy(txr->txtag,
2914 tx_buffer->map = NULL;
2916 } else if (tx_buffer->map != NULL) {
2917 bus_dmamap_unload(txr->txtag,
2919 bus_dmamap_destroy(txr->txtag,
2921 tx_buffer->map = NULL;
2924 #if __FreeBSD_version >= 800000
2925 if (txr->br != NULL)
2926 buf_ring_free(txr->br, M_DEVBUF);
2928 if (txr->tx_buffers != NULL) {
2929 free(txr->tx_buffers, M_DEVBUF);
2930 txr->tx_buffers = NULL;
2932 if (txr->txtag != NULL) {
2933 bus_dma_tag_destroy(txr->txtag);
2939 /*********************************************************************
2941 * Advanced Context Descriptor setup for VLAN or CSUM
2943 **********************************************************************/
2946 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2948 struct adapter *adapter = txr->adapter;
2949 struct ixgbe_adv_tx_context_desc *TXD;
2950 struct ixgbe_tx_buf *tx_buffer;
2951 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2952 struct ether_vlan_header *eh;
2954 struct ip6_hdr *ip6;
2955 int ehdrlen, ip_hlen = 0;
2958 bool offload = TRUE;
2959 int ctxd = txr->next_avail_desc;
2963 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2966 tx_buffer = &txr->tx_buffers[ctxd];
2967 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2970 ** In advanced descriptors the vlan tag must
2971 ** be placed into the descriptor itself.
2973 if (mp->m_flags & M_VLANTAG) {
2974 vtag = htole16(mp->m_pkthdr.ether_vtag);
2975 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2976 } else if (offload == FALSE)
2980 * Determine where frame payload starts.
2981 * Jump over vlan headers if already present,
2982 * helpful for QinQ too.
2984 eh = mtod(mp, struct ether_vlan_header *);
2985 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2986 etype = ntohs(eh->evl_proto);
2987 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2989 etype = ntohs(eh->evl_encap_proto);
2990 ehdrlen = ETHER_HDR_LEN;
2993 /* Set the ether header length */
2994 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2998 ip = (struct ip *)(mp->m_data + ehdrlen);
2999 ip_hlen = ip->ip_hl << 2;
3000 if (mp->m_len < ehdrlen + ip_hlen)
3003 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3005 case ETHERTYPE_IPV6:
3006 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3007 ip_hlen = sizeof(struct ip6_hdr);
3008 if (mp->m_len < ehdrlen + ip_hlen)
3010 ipproto = ip6->ip6_nxt;
3011 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3018 vlan_macip_lens |= ip_hlen;
3019 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3023 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3024 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3028 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3029 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
3032 #if __FreeBSD_version >= 800000
3034 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
3035 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3043 /* Now copy bits into descriptor */
3044 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3045 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3046 TXD->seqnum_seed = htole32(0);
3047 TXD->mss_l4len_idx = htole32(0);
3049 tx_buffer->m_head = NULL;
3050 tx_buffer->eop_index = -1;
3052 /* We've consumed the first desc, adjust counters */
3053 if (++ctxd == adapter->num_tx_desc)
3055 txr->next_avail_desc = ctxd;
3061 /**********************************************************************
3063 * Setup work for hardware segmentation offload (TSO) on
3064 * adapters using advanced tx descriptors
3066 **********************************************************************/
3068 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3070 struct adapter *adapter = txr->adapter;
3071 struct ixgbe_adv_tx_context_desc *TXD;
3072 struct ixgbe_tx_buf *tx_buffer;
3073 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3074 u32 mss_l4len_idx = 0;
3076 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
3077 struct ether_vlan_header *eh;
3083 * Determine where frame payload starts.
3084 * Jump over vlan headers if already present
3086 eh = mtod(mp, struct ether_vlan_header *);
3087 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3088 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3090 ehdrlen = ETHER_HDR_LEN;
3092 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3093 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3096 ctxd = txr->next_avail_desc;
3097 tx_buffer = &txr->tx_buffers[ctxd];
3098 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3100 ip = (struct ip *)(mp->m_data + ehdrlen);
3101 if (ip->ip_p != IPPROTO_TCP)
3102 return FALSE; /* 0 */
3104 ip_hlen = ip->ip_hl << 2;
3105 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3106 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3107 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3108 tcp_hlen = th->th_off << 2;
3109 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3111 /* This is used in the transmit desc in encap */
3112 *paylen = mp->m_pkthdr.len - hdrlen;
3114 /* VLAN MACLEN IPLEN */
3115 if (mp->m_flags & M_VLANTAG) {
3116 vtag = htole16(mp->m_pkthdr.ether_vtag);
3117 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3120 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3121 vlan_macip_lens |= ip_hlen;
3122 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3124 /* ADV DTYPE TUCMD */
3125 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3126 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3127 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3128 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3132 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3133 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3134 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3136 TXD->seqnum_seed = htole32(0);
3137 tx_buffer->m_head = NULL;
3138 tx_buffer->eop_index = -1;
3140 if (++ctxd == adapter->num_tx_desc)
3144 txr->next_avail_desc = ctxd;
3150 ** This routine parses packet headers so that Flow
3151 ** Director can make a hashed filter table entry
3152 ** allowing traffic flows to be identified and kept
3153 ** on the same cpu. This would be a performance
3154 ** hit, but we only do it at IXGBE_FDIR_RATE of
3158 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
3160 struct adapter *adapter = txr->adapter;
3161 struct ix_queue *que;
3162 struct ixgbe_atr_input atr_input;
3166 struct ether_vlan_header *eh;
3167 int ehdrlen, ip_hlen;
3168 u16 etype, vlan_id, src_port, dst_port, flex_bytes;
3169 u32 src_ipv4_addr, dst_ipv4_addr;
3170 u8 l4type = 0, ipproto = 0;
3172 eh = mtod(mp, struct ether_vlan_header *);
3173 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3174 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3176 ehdrlen = ETHER_HDR_LEN;
3177 etype = ntohs(eh->evl_proto);
3179 /* Only handling IPv4 */
3180 if (etype != ETHERTYPE_IP)
3183 ip = (struct ip *)(mp->m_data + ehdrlen);
3185 ip_hlen = ip->ip_hl << 2;
3186 src_port = dst_port = 0;
3188 /* check if we're UDP or TCP */
3191 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3192 src_port = th->th_sport;
3193 dst_port = th->th_dport;
3194 l4type |= IXGBE_ATR_L4TYPE_TCP;
3197 uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
3198 src_port = uh->uh_sport;
3199 dst_port = uh->uh_dport;
3200 l4type |= IXGBE_ATR_L4TYPE_UDP;
3206 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
3208 vlan_id = htole16(mp->m_pkthdr.ether_vtag);
3209 src_ipv4_addr = ip->ip_src.s_addr;
3210 dst_ipv4_addr = ip->ip_dst.s_addr;
3212 que = &adapter->queues[txr->me];
3214 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
3215 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
3216 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
3217 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
3218 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
3219 /* src and dst are inverted, think how the receiver sees them */
3220 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
3221 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
3223 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
3224 ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
3225 &atr_input, que->msix);
3229 /**********************************************************************
3231 * Examine each tx_buffer in the used queue. If the hardware is done
3232 * processing the packet then free associated resources. The
3233 * tx_buffer is put back on the free queue.
3235 **********************************************************************/
3237 ixgbe_txeof(struct tx_ring *txr)
3239 struct adapter *adapter = txr->adapter;
3240 struct ifnet *ifp = adapter->ifp;
3241 u32 first, last, done;
3242 struct ixgbe_tx_buf *tx_buffer;
3243 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3245 mtx_assert(&txr->tx_mtx, MA_OWNED);
3247 if (txr->tx_avail == adapter->num_tx_desc)
3250 first = txr->next_to_clean;
3251 tx_buffer = &txr->tx_buffers[first];
3252 /* For cleanup we just use legacy struct */
3253 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3254 last = tx_buffer->eop_index;
3257 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3260 ** Get the index of the first descriptor
3261 ** BEYOND the EOP and call that 'done'.
3262 ** I do this so the comparison in the
3263 ** inner while loop below can be simple
3265 if (++last == adapter->num_tx_desc) last = 0;
3268 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3269 BUS_DMASYNC_POSTREAD);
3271 ** Only the EOP descriptor of a packet now has the DD
3272 ** bit set, this is what we look for...
3274 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3275 /* We clean the range of the packet */
3276 while (first != done) {
3277 tx_desc->upper.data = 0;
3278 tx_desc->lower.data = 0;
3279 tx_desc->buffer_addr = 0;
3282 if (tx_buffer->m_head) {
3284 tx_buffer->m_head->m_pkthdr.len;
3285 bus_dmamap_sync(txr->txtag,
3287 BUS_DMASYNC_POSTWRITE);
3288 bus_dmamap_unload(txr->txtag,
3290 m_freem(tx_buffer->m_head);
3291 tx_buffer->m_head = NULL;
3292 tx_buffer->map = NULL;
3294 tx_buffer->eop_index = -1;
3295 txr->watchdog_time = ticks;
3297 if (++first == adapter->num_tx_desc)
3300 tx_buffer = &txr->tx_buffers[first];
3302 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3306 /* See if there is more work now */
3307 last = tx_buffer->eop_index;
3310 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3311 /* Get next done point */
3312 if (++last == adapter->num_tx_desc) last = 0;
3317 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3318 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3320 txr->next_to_clean = first;
3323 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
3324 * it is OK to send packets. If there are no pending descriptors,
3325 * clear the timeout. Otherwise, if some descriptors have been freed,
3326 * restart the timeout.
3328 if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
3329 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3330 if (txr->tx_avail == adapter->num_tx_desc) {
3331 txr->watchdog_check = FALSE;
3339 /*********************************************************************
3341 * Refresh mbuf buffers for RX descriptor rings
3342 * - now keeps its own state so discards due to resource
3343 * exhaustion are unnecessary, if an mbuf cannot be obtained
3344 * it just returns, keeping its placeholder, thus it can simply
3345 * be recalled to try again.
3347 **********************************************************************/
3349 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
3351 struct adapter *adapter = rxr->adapter;
3352 bus_dma_segment_t hseg[1];
3353 bus_dma_segment_t pseg[1];
3354 struct ixgbe_rx_buf *rxbuf;
3355 struct mbuf *mh, *mp;
3356 int i, nsegs, error, cleaned;
3358 i = rxr->next_to_refresh;
3359 cleaned = -1; /* Signify no completions */
3360 while (i != limit) {
3361 rxbuf = &rxr->rx_buffers[i];
3362 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
3363 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3366 mh->m_pkthdr.len = mh->m_len = MHLEN;
3368 mh->m_flags |= M_PKTHDR;
3369 m_adj(mh, ETHER_ALIGN);
3370 /* Get the memory mapping */
3371 error = bus_dmamap_load_mbuf_sg(rxr->htag,
3372 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
3374 printf("GET BUF: dmamap load"
3375 " failure - %d\n", error);
3380 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3381 BUS_DMASYNC_PREREAD);
3382 rxr->rx_base[i].read.hdr_addr =
3383 htole64(hseg[0].ds_addr);
3386 if (rxbuf->m_pack == NULL) {
3387 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3388 M_PKTHDR, adapter->rx_mbuf_sz);
3391 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3392 /* Get the memory mapping */
3393 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
3394 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
3396 printf("GET BUF: dmamap load"
3397 " failure - %d\n", error);
3402 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3403 BUS_DMASYNC_PREREAD);
3404 rxr->rx_base[i].read.pkt_addr =
3405 htole64(pseg[0].ds_addr);
3409 /* Calculate next index */
3410 if (++i == adapter->num_rx_desc)
3412 /* This is the work marker for refresh */
3413 rxr->next_to_refresh = i;
3416 if (cleaned != -1) /* If we refreshed some, bump tail */
3417 IXGBE_WRITE_REG(&adapter->hw,
3418 IXGBE_RDT(rxr->me), cleaned);
3422 /*********************************************************************
3424 * Allocate memory for rx_buffer structures. Since we use one
3425 * rx_buffer per received packet, the maximum number of rx_buffer's
3426 * that we'll need is equal to the number of receive descriptors
3427 * that we've allocated.
3429 **********************************************************************/
3431 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3433 struct adapter *adapter = rxr->adapter;
3434 device_t dev = adapter->dev;
3435 struct ixgbe_rx_buf *rxbuf;
3436 int i, bsize, error;
3438 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3439 if (!(rxr->rx_buffers =
3440 (struct ixgbe_rx_buf *) malloc(bsize,
3441 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3442 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3447 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3448 1, 0, /* alignment, bounds */
3449 BUS_SPACE_MAXADDR, /* lowaddr */
3450 BUS_SPACE_MAXADDR, /* highaddr */
3451 NULL, NULL, /* filter, filterarg */
3452 MSIZE, /* maxsize */
3454 MSIZE, /* maxsegsize */
3456 NULL, /* lockfunc */
3457 NULL, /* lockfuncarg */
3459 device_printf(dev, "Unable to create RX DMA tag\n");
3463 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3464 1, 0, /* alignment, bounds */
3465 BUS_SPACE_MAXADDR, /* lowaddr */
3466 BUS_SPACE_MAXADDR, /* highaddr */
3467 NULL, NULL, /* filter, filterarg */
3468 MJUMPAGESIZE, /* maxsize */
3470 MJUMPAGESIZE, /* maxsegsize */
3472 NULL, /* lockfunc */
3473 NULL, /* lockfuncarg */
3475 device_printf(dev, "Unable to create RX DMA tag\n");
3479 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3480 rxbuf = &rxr->rx_buffers[i];
3481 error = bus_dmamap_create(rxr->htag,
3482 BUS_DMA_NOWAIT, &rxbuf->hmap);
3484 device_printf(dev, "Unable to create RX head map\n");
3487 error = bus_dmamap_create(rxr->ptag,
3488 BUS_DMA_NOWAIT, &rxbuf->pmap);
3490 device_printf(dev, "Unable to create RX pkt map\n");
3498 /* Frees all, but can handle partial completion */
3499 ixgbe_free_receive_structures(adapter);
3504 ** Used to detect a descriptor that has
3505 ** been merged by Hardware RSC.
3508 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
3510 return (le32toh(rx->wb.lower.lo_dword.data) &
3511 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
3514 /*********************************************************************
3516 * Initialize Hardware RSC (LRO) feature on 82599
3517 * for an RX ring, this is toggled by the LRO capability
3518 * even though it is transparent to the stack.
3520 **********************************************************************/
3522 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
3524 struct adapter *adapter = rxr->adapter;
3525 struct ixgbe_hw *hw = &adapter->hw;
3526 u32 rscctrl, rdrxctl;
3528 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3529 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3530 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3531 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
3532 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3534 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
3535 rscctrl |= IXGBE_RSCCTL_RSCEN;
3537 ** Limit the total number of descriptors that
3538 ** can be combined, so it does not exceed 64K
3540 if (adapter->rx_mbuf_sz == MCLBYTES)
3541 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3542 else /* using 4K clusters */
3543 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
3544 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
3546 /* Enable TCP header recognition */
3547 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
3548 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
3549 IXGBE_PSRTYPE_TCPHDR));
3551 /* Disable RSC for ACK packets */
3552 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3553 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3560 ixgbe_free_receive_ring(struct rx_ring *rxr)
3562 struct adapter *adapter;
3563 struct ixgbe_rx_buf *rxbuf;
3566 adapter = rxr->adapter;
3567 for (i = 0; i < adapter->num_rx_desc; i++) {
3568 rxbuf = &rxr->rx_buffers[i];
3569 if (rxbuf->m_head != NULL) {
3570 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3571 BUS_DMASYNC_POSTREAD);
3572 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3573 rxbuf->m_head->m_flags |= M_PKTHDR;
3574 m_freem(rxbuf->m_head);
3576 if (rxbuf->m_pack != NULL) {
3577 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3578 BUS_DMASYNC_POSTREAD);
3579 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3580 rxbuf->m_pack->m_flags |= M_PKTHDR;
3581 m_freem(rxbuf->m_pack);
3583 rxbuf->m_head = NULL;
3584 rxbuf->m_pack = NULL;
3589 /*********************************************************************
3591 * Initialize a receive ring and its buffers.
3593 **********************************************************************/
3595 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3597 struct adapter *adapter;
3600 struct ixgbe_rx_buf *rxbuf;
3601 bus_dma_segment_t pseg[1], hseg[1];
3602 struct lro_ctrl *lro = &rxr->lro;
3603 int rsize, nsegs, error = 0;
3605 adapter = rxr->adapter;
3609 /* Clear the ring contents */
3611 rsize = roundup2(adapter->num_rx_desc *
3612 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3613 bzero((void *)rxr->rx_base, rsize);
3615 /* Free current RX buffer structs and their mbufs */
3616 ixgbe_free_receive_ring(rxr);
3618 /* Configure header split? */
3619 if (ixgbe_header_split)
3620 rxr->hdr_split = TRUE;
3622 /* Now replenish the mbufs */
3623 for (int j = 0; j != adapter->num_rx_desc; ++j) {
3624 struct mbuf *mh, *mp;
3626 rxbuf = &rxr->rx_buffers[j];
3628 ** Dont allocate mbufs if not
3629 ** doing header split, its wasteful
3631 if (rxr->hdr_split == FALSE)
3634 /* First the header */
3635 rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
3636 if (rxbuf->m_head == NULL)
3638 m_adj(rxbuf->m_head, ETHER_ALIGN);
3640 mh->m_len = mh->m_pkthdr.len = MHLEN;
3641 mh->m_flags |= M_PKTHDR;
3642 /* Get the memory mapping */
3643 error = bus_dmamap_load_mbuf_sg(rxr->htag,
3644 rxbuf->hmap, rxbuf->m_head, hseg,
3645 &nsegs, BUS_DMA_NOWAIT);
3646 if (error != 0) /* Nothing elegant to do here */
3648 bus_dmamap_sync(rxr->htag,
3649 rxbuf->hmap, BUS_DMASYNC_PREREAD);
3650 /* Update descriptor */
3651 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
3654 /* Now the payload cluster */
3655 rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA,
3656 M_PKTHDR, adapter->rx_mbuf_sz);
3657 if (rxbuf->m_pack == NULL)
3660 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3661 /* Get the memory mapping */
3662 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
3663 rxbuf->pmap, mp, pseg,
3664 &nsegs, BUS_DMA_NOWAIT);
3667 bus_dmamap_sync(rxr->ptag,
3668 rxbuf->pmap, BUS_DMASYNC_PREREAD);
3669 /* Update descriptor */
3670 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
3674 /* Setup our descriptor indices */
3675 rxr->next_to_check = 0;
3676 rxr->next_to_refresh = 0;
3677 rxr->lro_enabled = FALSE;
3678 rxr->rx_split_packets = 0;
3681 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3682 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3685 ** Now set up the LRO interface:
3686 ** 82598 uses software LRO, the
3687 ** 82599 uses a hardware assist.
3689 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
3690 (ifp->if_capenable & IFCAP_RXCSUM) &&
3691 (ifp->if_capenable & IFCAP_LRO))
3692 ixgbe_setup_hw_rsc(rxr);
3693 else if (ifp->if_capenable & IFCAP_LRO) {
3694 int err = tcp_lro_init(lro);
3696 device_printf(dev, "LRO Initialization failed!\n");
3699 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3700 rxr->lro_enabled = TRUE;
3701 lro->ifp = adapter->ifp;
3704 IXGBE_RX_UNLOCK(rxr);
3708 ixgbe_free_receive_ring(rxr);
3709 IXGBE_RX_UNLOCK(rxr);
3713 /*********************************************************************
3715 * Initialize all receive rings.
3717 **********************************************************************/
3719 ixgbe_setup_receive_structures(struct adapter *adapter)
3721 struct rx_ring *rxr = adapter->rx_rings;
3724 for (j = 0; j < adapter->num_queues; j++, rxr++)
3725 if (ixgbe_setup_receive_ring(rxr))
3731 * Free RX buffers allocated so far, we will only handle
3732 * the rings that completed, the failing case will have
3733 * cleaned up for itself. 'j' failed, so its the terminus.
3735 for (int i = 0; i < j; ++i) {
3736 rxr = &adapter->rx_rings[i];
3737 ixgbe_free_receive_ring(rxr);
3743 /*********************************************************************
3745 * Setup receive registers and features.
3747 **********************************************************************/
3748 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3751 ixgbe_initialize_receive_units(struct adapter *adapter)
3753 struct rx_ring *rxr = adapter->rx_rings;
3754 struct ixgbe_hw *hw = &adapter->hw;
3755 struct ifnet *ifp = adapter->ifp;
3756 u32 bufsz, rxctrl, fctrl, srrctl, rxcsum;
3757 u32 reta, mrqc = 0, hlreg, random[10];
3761 * Make sure receives are disabled while
3762 * setting up the descriptor ring
3764 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3765 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3766 rxctrl & ~IXGBE_RXCTRL_RXEN);
3768 /* Enable broadcasts */
3769 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3770 fctrl |= IXGBE_FCTRL_BAM;
3771 fctrl |= IXGBE_FCTRL_DPF;
3772 fctrl |= IXGBE_FCTRL_PMCF;
3773 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3775 /* Set for Jumbo Frames? */
3776 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3777 if (ifp->if_mtu > ETHERMTU) {
3778 hlreg |= IXGBE_HLREG0_JUMBOEN;
3779 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3781 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3782 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3784 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3786 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3787 u64 rdba = rxr->rxdma.dma_paddr;
3789 /* Setup the Base and Length of the Rx Descriptor Ring */
3790 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3791 (rdba & 0x00000000ffffffffULL));
3792 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
3793 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3794 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3796 /* Set up the SRRCTL register */
3797 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
3798 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3799 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3801 if (rxr->hdr_split) {
3802 /* Use a standard mbuf for the header */
3803 srrctl |= ((IXGBE_RX_HDR <<
3804 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3805 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3806 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3808 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3809 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
3811 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3812 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3813 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3816 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3817 /* PSRTYPE must be initialized in 82599 */
3818 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3819 IXGBE_PSRTYPE_UDPHDR |
3820 IXGBE_PSRTYPE_IPV4HDR |
3821 IXGBE_PSRTYPE_IPV6HDR;
3822 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3825 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3828 if (adapter->num_queues > 1) {
3832 /* set up random bits */
3833 arc4rand(&random, sizeof(random), 0);
3835 /* Set up the redirection table */
3836 for (i = 0, j = 0; i < 128; i++, j++) {
3837 if (j == adapter->num_queues) j = 0;
3838 reta = (reta << 8) | (j * 0x11);
3840 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3843 /* Now fill our hash function seeds */
3844 for (int i = 0; i < 10; i++)
3845 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3847 /* Perform hash on these packet types */
3848 mrqc = IXGBE_MRQC_RSSEN
3849 | IXGBE_MRQC_RSS_FIELD_IPV4
3850 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3851 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3852 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3853 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3854 | IXGBE_MRQC_RSS_FIELD_IPV6
3855 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3856 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3857 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3858 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3860 /* RSS and RX IPP Checksum are mutually exclusive */
3861 rxcsum |= IXGBE_RXCSUM_PCSD;
3864 if (ifp->if_capenable & IFCAP_RXCSUM)
3865 rxcsum |= IXGBE_RXCSUM_PCSD;
3867 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3868 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3870 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3875 /*********************************************************************
3877 * Free all receive rings.
3879 **********************************************************************/
3881 ixgbe_free_receive_structures(struct adapter *adapter)
3883 struct rx_ring *rxr = adapter->rx_rings;
3885 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3886 struct lro_ctrl *lro = &rxr->lro;
3887 ixgbe_free_receive_buffers(rxr);
3888 /* Free LRO memory */
3890 /* Free the ring memory as well */
3891 ixgbe_dma_free(adapter, &rxr->rxdma);
3894 free(adapter->rx_rings, M_DEVBUF);
3898 /*********************************************************************
3900 * Free receive ring data structures
3902 **********************************************************************/
3904 ixgbe_free_receive_buffers(struct rx_ring *rxr)
3906 struct adapter *adapter = rxr->adapter;
3907 struct ixgbe_rx_buf *rxbuf;
3909 INIT_DEBUGOUT("free_receive_structures: begin");
3911 /* Cleanup any existing buffers */
3912 if (rxr->rx_buffers != NULL) {
3913 for (int i = 0; i < adapter->num_rx_desc; i++) {
3914 rxbuf = &rxr->rx_buffers[i];
3915 if (rxbuf->m_head != NULL) {
3916 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3917 BUS_DMASYNC_POSTREAD);
3918 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3919 rxbuf->m_head->m_flags |= M_PKTHDR;
3920 m_freem(rxbuf->m_head);
3922 if (rxbuf->m_pack != NULL) {
3923 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3924 BUS_DMASYNC_POSTREAD);
3925 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3926 rxbuf->m_pack->m_flags |= M_PKTHDR;
3927 m_freem(rxbuf->m_pack);
3929 rxbuf->m_head = NULL;
3930 rxbuf->m_pack = NULL;
3931 if (rxbuf->hmap != NULL) {
3932 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3935 if (rxbuf->pmap != NULL) {
3936 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3940 if (rxr->rx_buffers != NULL) {
3941 free(rxr->rx_buffers, M_DEVBUF);
3942 rxr->rx_buffers = NULL;
3946 if (rxr->htag != NULL) {
3947 bus_dma_tag_destroy(rxr->htag);
3950 if (rxr->ptag != NULL) {
3951 bus_dma_tag_destroy(rxr->ptag);
3958 static __inline void
3959 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3963 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3964 * should be computed by hardware. Also it should not have VLAN tag in
3967 if (rxr->lro_enabled &&
3968 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3969 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3970 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3971 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3972 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3973 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3975 * Send to the stack if:
3976 ** - LRO not enabled, or
3977 ** - no LRO resources, or
3978 ** - lro enqueue fails
3980 if (rxr->lro.lro_cnt != 0)
3981 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3984 (*ifp->if_input)(ifp, m);
3987 static __inline void
3988 ixgbe_rx_discard(struct rx_ring *rxr, int i)
3990 struct adapter *adapter = rxr->adapter;
3991 struct ixgbe_rx_buf *rbuf;
3992 struct mbuf *mh, *mp;
3994 rbuf = &rxr->rx_buffers[i];
3995 if (rbuf->fmp != NULL) /* Partial chain ? */
4001 /* Reuse loaded DMA map and just update mbuf chain */
4003 mh->m_flags |= M_PKTHDR;
4006 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
4007 mp->m_data = mp->m_ext.ext_buf;
4013 /*********************************************************************
4015 * This routine executes in interrupt context. It replenishes
4016 * the mbufs in the descriptor and sends data which has been
4017 * dma'ed into host memory to upper layer.
4019 * We loop at most count times if count is > 0, or until done if
4022 * Return TRUE for more work, FALSE for all clean.
4023 *********************************************************************/
4025 ixgbe_rxeof(struct ix_queue *que, int count)
4027 struct adapter *adapter = que->adapter;
4028 struct rx_ring *rxr = que->rxr;
4029 struct ifnet *ifp = adapter->ifp;
4030 struct lro_ctrl *lro = &rxr->lro;
4031 struct lro_entry *queued;
4032 int i, nextp, processed = 0;
4034 union ixgbe_adv_rx_desc *cur;
4035 struct ixgbe_rx_buf *rbuf, *nbuf;
4039 /* Sync the ring. */
4040 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4041 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4043 for (i = rxr->next_to_check; count != 0;) {
4044 struct mbuf *sendmp, *mh, *mp;
4046 u16 hlen, plen, hdr, vtag;
4049 cur = &rxr->rx_base[i];
4050 staterr = le32toh(cur->wb.upper.status_error);
4052 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
4054 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4061 cur->wb.upper.status_error = 0;
4062 rbuf = &rxr->rx_buffers[i];
4066 plen = le16toh(cur->wb.upper.length);
4067 ptype = le32toh(cur->wb.lower.lo_dword.data) &
4068 IXGBE_RXDADV_PKTTYPE_MASK;
4069 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
4070 vtag = le16toh(cur->wb.upper.vlan);
4071 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
4073 /* Make sure all parts of a bad packet are discarded */
4074 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
4077 rxr->rx_discarded++;
4079 rxr->discard = TRUE;
4081 rxr->discard = FALSE;
4082 ixgbe_rx_discard(rxr, i);
4087 ** On 82599 which supports a hardware
4088 ** LRO (called HW RSC), packets need
4089 ** not be fragmented across sequential
4090 ** descriptors, rather the next descriptor
4091 ** is indicated in bits of the descriptor.
4092 ** This also means that we might proceses
4093 ** more than one packet at a time, something
4094 ** that has never been true before, it
4095 ** required eliminating global chain pointers
4096 ** in favor of what we are doing here. -jfv
4100 ** Figure out the next descriptor
4103 if (rxr->hw_rsc == TRUE) {
4104 rsc = ixgbe_rsc_count(cur);
4105 rxr->rsc_num += (rsc - 1);
4107 if (rsc) { /* Get hardware index */
4109 IXGBE_RXDADV_NEXTP_MASK) >>
4110 IXGBE_RXDADV_NEXTP_SHIFT);
4111 } else { /* Just sequential */
4113 if (nextp == adapter->num_rx_desc)
4116 nbuf = &rxr->rx_buffers[nextp];
4120 ** The header mbuf is ONLY used when header
4121 ** split is enabled, otherwise we get normal
4122 ** behavior, ie, both header and payload
4123 ** are DMA'd into the payload buffer.
4125 ** Rather than using the fmp/lmp global pointers
4126 ** we now keep the head of a packet chain in the
4127 ** buffer struct and pass this along from one
4128 ** descriptor to the next, until we get EOP.
4130 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
4131 /* This must be an initial descriptor */
4132 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
4133 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
4134 if (hlen > IXGBE_RX_HDR)
4135 hlen = IXGBE_RX_HDR;
4137 mh->m_flags |= M_PKTHDR;
4139 mh->m_pkthdr.len = mh->m_len;
4140 /* Null buf pointer so it is refreshed */
4141 rbuf->m_head = NULL;
4143 ** Check the payload length, this
4144 ** could be zero if its a small
4150 mp->m_flags &= ~M_PKTHDR;
4152 mh->m_pkthdr.len += mp->m_len;
4153 /* Null buf pointer so it is refreshed */
4154 rbuf->m_pack = NULL;
4155 rxr->rx_split_packets++;
4158 ** Now create the forward
4159 ** chain so when complete
4163 /* stash the chain head */
4165 /* Make forward chain */
4167 mp->m_next = nbuf->m_pack;
4169 mh->m_next = nbuf->m_pack;
4171 /* Singlet, prepare to send */
4173 if (staterr & IXGBE_RXD_STAT_VP) {
4174 sendmp->m_pkthdr.ether_vtag = vtag;
4175 sendmp->m_flags |= M_VLANTAG;
4180 ** Either no header split, or a
4181 ** secondary piece of a fragmented
4186 ** See if there is a stored head
4187 ** that determines what we are
4190 rbuf->m_pack = rbuf->fmp = NULL;
4192 if (sendmp != NULL) /* secondary frag */
4193 sendmp->m_pkthdr.len += mp->m_len;
4195 /* first desc of a non-ps chain */
4197 sendmp->m_flags |= M_PKTHDR;
4198 sendmp->m_pkthdr.len = mp->m_len;
4199 if (staterr & IXGBE_RXD_STAT_VP) {
4200 sendmp->m_pkthdr.ether_vtag = vtag;
4201 sendmp->m_flags |= M_VLANTAG;
4204 /* Pass the head pointer on */
4208 mp->m_next = nbuf->m_pack;
4212 /* Sending this frame? */
4214 sendmp->m_pkthdr.rcvif = ifp;
4217 /* capture data for AIM */
4218 rxr->bytes += sendmp->m_pkthdr.len;
4219 rxr->rx_bytes += sendmp->m_pkthdr.len;
4220 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
4221 ixgbe_rx_checksum(staterr, sendmp, ptype);
4222 #if __FreeBSD_version >= 800000
4223 sendmp->m_pkthdr.flowid = que->msix;
4224 sendmp->m_flags |= M_FLOWID;
4228 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4229 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4231 /* Advance our pointers to the next descriptor. */
4232 if (++i == adapter->num_rx_desc)
4235 /* Now send to the stack or do LRO */
4237 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
4239 /* Every 8 descriptors we go to refresh mbufs */
4240 if (processed == 8) {
4241 ixgbe_refresh_mbufs(rxr, i);
4246 /* Refresh any remaining buf structs */
4247 if (processed != 0) {
4248 ixgbe_refresh_mbufs(rxr, i);
4252 rxr->next_to_check = i;
4255 * Flush any outstanding LRO work
4257 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
4258 SLIST_REMOVE_HEAD(&lro->lro_active, next);
4259 tcp_lro_flush(lro, queued);
4262 IXGBE_RX_UNLOCK(rxr);
4265 ** We still have cleaning to do?
4266 ** Schedule another interrupt if so.
4268 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
4269 ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
4277 /*********************************************************************
4279 * Verify that the hardware indicated that the checksum is valid.
4280 * Inform the stack about the status of checksum so that stack
4281 * doesn't spend time verifying the checksum.
4283 *********************************************************************/
4285 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
4287 u16 status = (u16) staterr;
4288 u8 errors = (u8) (staterr >> 24);
4291 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
4292 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
4295 if (status & IXGBE_RXD_STAT_IPCS) {
4296 if (!(errors & IXGBE_RXD_ERR_IPE)) {
4297 /* IP Checksum Good */
4298 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4299 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4302 mp->m_pkthdr.csum_flags = 0;
4304 if (status & IXGBE_RXD_STAT_L4CS) {
4305 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4306 #if __FreeBSD_version >= 800000
4308 type = CSUM_SCTP_VALID;
4310 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
4311 mp->m_pkthdr.csum_flags |= type;
4313 mp->m_pkthdr.csum_data = htons(0xffff);
4321 ** This routine is run via an vlan config EVENT,
4322 ** it enables us to use the HW Filter table since
4323 ** we can get the vlan id. This just creates the
4324 ** entry in the soft version of the VFTA, init will
4325 ** repopulate the real table.
4328 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4330 struct adapter *adapter = ifp->if_softc;
4333 if (ifp->if_softc != arg) /* Not our event */
4336 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4339 index = (vtag >> 5) & 0x7F;
4341 ixgbe_shadow_vfta[index] |= (1 << bit);
4342 ++adapter->num_vlans;
4343 /* Re-init to load the changes */
4344 ixgbe_init(adapter);
4348 ** This routine is run via an vlan
4349 ** unconfig EVENT, remove our entry
4350 ** in the soft vfta.
4353 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4355 struct adapter *adapter = ifp->if_softc;
4358 if (ifp->if_softc != arg)
4361 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4364 index = (vtag >> 5) & 0x7F;
4366 ixgbe_shadow_vfta[index] &= ~(1 << bit);
4367 --adapter->num_vlans;
4368 /* Re-init to load the changes */
4369 ixgbe_init(adapter);
4373 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
4375 struct ixgbe_hw *hw = &adapter->hw;
4380 ** We get here thru init_locked, meaning
4381 ** a soft reset, this has already cleared
4382 ** the VFTA and other state, so if there
4383 ** have been no vlan's registered do nothing.
4385 if (adapter->num_vlans == 0)
4389 ** A soft reset zero's out the VFTA, so
4390 ** we need to repopulate it now.
4392 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
4393 if (ixgbe_shadow_vfta[i] != 0)
4394 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
4395 ixgbe_shadow_vfta[i]);
4397 /* Enable the Filter Table */
4398 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4399 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
4400 ctrl |= IXGBE_VLNCTRL_VFE;
4401 if (hw->mac.type == ixgbe_mac_82598EB)
4402 ctrl |= IXGBE_VLNCTRL_VME;
4403 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
4405 /* On 82599 the VLAN enable is per/queue in RXDCTL */
4406 if (hw->mac.type == ixgbe_mac_82599EB)
4407 for (int i = 0; i < adapter->num_queues; i++) {
4408 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
4409 ctrl |= IXGBE_RXDCTL_VME;
4410 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
4415 ixgbe_enable_intr(struct adapter *adapter)
4417 struct ixgbe_hw *hw = &adapter->hw;
4418 struct ix_queue *que = adapter->queues;
4419 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4422 /* Enable Fan Failure detection */
4423 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4424 mask |= IXGBE_EIMS_GPI_SDP1;
4426 /* 82599 specific interrupts */
4427 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4428 mask |= IXGBE_EIMS_ECC;
4429 mask |= IXGBE_EIMS_GPI_SDP1;
4430 mask |= IXGBE_EIMS_GPI_SDP2;
4432 mask |= IXGBE_EIMS_FLOW_DIR;
4436 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4438 /* With RSS we use auto clear */
4439 if (adapter->msix_mem) {
4440 mask = IXGBE_EIMS_ENABLE_MASK;
4441 /* Dont autoclear Link */
4442 mask &= ~IXGBE_EIMS_OTHER;
4443 mask &= ~IXGBE_EIMS_LSC;
4444 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4448 ** Now enable all queues, this is done seperately to
4449 ** allow for handling the extended (beyond 32) MSIX
4450 ** vectors that can be used by 82599
4452 for (int i = 0; i < adapter->num_queues; i++, que++)
4453 ixgbe_enable_queue(adapter, que->msix);
4455 IXGBE_WRITE_FLUSH(hw);
4461 ixgbe_disable_intr(struct adapter *adapter)
4463 if (adapter->msix_mem)
4464 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4465 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
4466 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
4468 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
4469 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
4470 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
4472 IXGBE_WRITE_FLUSH(&adapter->hw);
4477 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
4481 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
4488 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
4490 pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
4497 ** Setup the correct IVAR register for a particular MSIX interrupt
4498 ** (yes this is all very magic and confusing :)
4499 ** - entry is the register array entry
4500 ** - vector is the MSIX vector for this queue
4501 ** - type is RX/TX/MISC
4504 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4506 struct ixgbe_hw *hw = &adapter->hw;
4509 vector |= IXGBE_IVAR_ALLOC_VAL;
4511 switch (hw->mac.type) {
4513 case ixgbe_mac_82598EB:
4515 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4517 entry += (type * 64);
4518 index = (entry >> 2) & 0x1F;
4519 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4520 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4521 ivar |= (vector << (8 * (entry & 0x3)));
4522 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4525 case ixgbe_mac_82599EB:
4526 if (type == -1) { /* MISC IVAR */
4527 index = (entry & 1) * 8;
4528 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4529 ivar &= ~(0xFF << index);
4530 ivar |= (vector << index);
4531 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4532 } else { /* RX/TX IVARS */
4533 index = (16 * (entry & 1)) + (8 * type);
4534 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4535 ivar &= ~(0xFF << index);
4536 ivar |= (vector << index);
4537 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4546 ixgbe_configure_ivars(struct adapter *adapter)
4548 struct ix_queue *que = adapter->queues;
4550 for (int i = 0; i < adapter->num_queues; i++, que++) {
4551 /* First the RX queue entry */
4552 ixgbe_set_ivar(adapter, i, que->msix, 0);
4553 /* ... and the TX */
4554 ixgbe_set_ivar(adapter, i, que->msix, 1);
4555 /* Set an Initial EITR value */
4556 IXGBE_WRITE_REG(&adapter->hw,
4557 IXGBE_EITR(que->msix), IXGBE_LOW_LATENCY);
4560 /* For the Link interrupt */
4561 ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
4565 ** ixgbe_sfp_probe - called in the local timer to
4566 ** determine if a port had optics inserted.
4568 static bool ixgbe_sfp_probe(struct adapter *adapter)
4570 struct ixgbe_hw *hw = &adapter->hw;
4571 device_t dev = adapter->dev;
4572 bool result = FALSE;
4574 if ((hw->phy.type == ixgbe_phy_nl) &&
4575 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4576 s32 ret = hw->phy.ops.identify_sfp(hw);
4579 ret = hw->phy.ops.reset(hw);
4580 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4581 device_printf(dev,"Unsupported SFP+ module detected!");
4582 printf(" Reload driver with supported module.\n");
4583 adapter->sfp_probe = FALSE;
4586 device_printf(dev,"SFP+ module detected!\n");
4587 /* We now have supported optics */
4588 adapter->sfp_probe = FALSE;
4596 ** Tasklet handler for MSIX Link interrupts
4597 ** - do outside interrupt since it might sleep
4600 ixgbe_handle_link(void *context, int pending)
4602 struct adapter *adapter = context;
4604 ixgbe_check_link(&adapter->hw,
4605 &adapter->link_speed, &adapter->link_up, 0);
4606 ixgbe_update_link_status(adapter);
4610 ** Tasklet for handling SFP module interrupts
4613 ixgbe_handle_mod(void *context, int pending)
4615 struct adapter *adapter = context;
4616 struct ixgbe_hw *hw = &adapter->hw;
4617 device_t dev = adapter->dev;
4620 err = hw->phy.ops.identify_sfp(hw);
4621 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4623 "Unsupported SFP+ module type was detected.\n");
4626 err = hw->mac.ops.setup_sfp(hw);
4627 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4629 "Setup failure - unsupported SFP+ module type.\n");
4632 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
4638 ** Tasklet for handling MSF (multispeed fiber) interrupts
4641 ixgbe_handle_msf(void *context, int pending)
4643 struct adapter *adapter = context;
4644 struct ixgbe_hw *hw = &adapter->hw;
4648 autoneg = hw->phy.autoneg_advertised;
4649 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4650 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4651 if (hw->mac.ops.setup_link)
4652 hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
4654 ixgbe_check_link(&adapter->hw, &speed, &adapter->link_up, 0);
4655 ixgbe_update_link_status(adapter);
4662 ** Tasklet for reinitializing the Flow Director filter table
4665 ixgbe_reinit_fdir(void *context, int pending)
4667 struct adapter *adapter = context;
4668 struct ifnet *ifp = adapter->ifp;
4670 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
4672 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
4673 adapter->fdir_reinit = 0;
4674 /* Restart the interface */
4675 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4680 /**********************************************************************
4682 * Update the board statistics counters.
4684 **********************************************************************/
4686 ixgbe_update_stats_counters(struct adapter *adapter)
4688 struct ifnet *ifp = adapter->ifp;
4689 struct ixgbe_hw *hw = &adapter->hw;
4690 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4691 u64 total_missed_rx = 0;
4693 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4695 for (int i = 0; i < 8; i++) {
4697 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4698 /* missed_rx tallies misses for the gprc workaround */
4700 /* global total per queue */
4701 adapter->stats.mpc[i] += mp;
4702 /* Running comprehensive total for stats display */
4703 total_missed_rx += adapter->stats.mpc[i];
4704 if (hw->mac.type == ixgbe_mac_82598EB)
4705 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4708 /* Hardware workaround, gprc counts missed packets */
4709 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4710 adapter->stats.gprc -= missed_rx;
4712 if (hw->mac.type == ixgbe_mac_82599EB) {
4713 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4714 IXGBE_READ_REG(hw, IXGBE_GORCH); /* clears register */
4715 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4716 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* clears register */
4717 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4718 IXGBE_READ_REG(hw, IXGBE_TORH); /* clears register */
4719 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4720 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4722 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4723 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4724 /* 82598 only has a counter in the high register */
4725 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4726 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4727 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4731 * Workaround: mprc hardware is incorrectly counting
4732 * broadcasts, so for now we subtract those.
4734 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4735 adapter->stats.bprc += bprc;
4736 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4737 adapter->stats.mprc -= bprc;
4739 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4740 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4741 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4742 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4743 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4744 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4745 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4746 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4748 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4749 adapter->stats.lxontxc += lxon;
4750 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4751 adapter->stats.lxofftxc += lxoff;
4752 total = lxon + lxoff;
4754 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4755 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4756 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4757 adapter->stats.gptc -= total;
4758 adapter->stats.mptc -= total;
4759 adapter->stats.ptc64 -= total;
4760 adapter->stats.gotc -= total * ETHER_MIN_LEN;
4762 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4763 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4764 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4765 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4766 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4767 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4768 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4769 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4770 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4771 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4774 /* Fill out the OS statistics structure */
4775 ifp->if_ipackets = adapter->stats.gprc;
4776 ifp->if_opackets = adapter->stats.gptc;
4777 ifp->if_ibytes = adapter->stats.gorc;
4778 ifp->if_obytes = adapter->stats.gotc;
4779 ifp->if_imcasts = adapter->stats.mprc;
4780 ifp->if_collisions = 0;
4783 ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
4784 adapter->stats.rlec;
4788 /**********************************************************************
4790 * This routine is called only when ixgbe_display_debug_stats is enabled.
4791 * This routine provides a way to take a look at important statistics
4792 * maintained by the driver and hardware.
4794 **********************************************************************/
4796 ixgbe_print_hw_stats(struct adapter * adapter)
4798 device_t dev = adapter->dev;
4801 device_printf(dev,"Std Mbuf Failed = %lu\n",
4802 adapter->mbuf_defrag_failed);
4803 device_printf(dev,"Missed Packets = %llu\n",
4804 (long long)adapter->stats.mpc[0]);
4805 device_printf(dev,"Receive length errors = %llu\n",
4806 ((long long)adapter->stats.roc +
4807 (long long)adapter->stats.ruc));
4808 device_printf(dev,"Crc errors = %llu\n",
4809 (long long)adapter->stats.crcerrs);
4810 device_printf(dev,"Driver dropped packets = %lu\n",
4811 adapter->dropped_pkts);
4812 device_printf(dev, "watchdog timeouts = %ld\n",
4813 adapter->watchdog_events);
4815 device_printf(dev,"XON Rcvd = %llu\n",
4816 (long long)adapter->stats.lxonrxc);
4817 device_printf(dev,"XON Xmtd = %llu\n",
4818 (long long)adapter->stats.lxontxc);
4819 device_printf(dev,"XOFF Rcvd = %llu\n",
4820 (long long)adapter->stats.lxoffrxc);
4821 device_printf(dev,"XOFF Xmtd = %llu\n",
4822 (long long)adapter->stats.lxofftxc);
4824 device_printf(dev,"Total Packets Rcvd = %llu\n",
4825 (long long)adapter->stats.tpr);
4826 device_printf(dev,"Good Packets Rcvd = %llu\n",
4827 (long long)adapter->stats.gprc);
4828 device_printf(dev,"Good Packets Xmtd = %llu\n",
4829 (long long)adapter->stats.gptc);
4830 device_printf(dev,"TSO Transmissions = %lu\n",
4836 /**********************************************************************
4838 * This routine is called only when em_display_debug_stats is enabled.
4839 * This routine provides a way to take a look at important statistics
4840 * maintained by the driver and hardware.
4842 **********************************************************************/
4844 ixgbe_print_debug_info(struct adapter *adapter)
4846 device_t dev = adapter->dev;
4847 struct ixgbe_hw *hw = &adapter->hw;
4848 struct ix_queue *que = adapter->queues;
4849 struct rx_ring *rxr;
4850 struct tx_ring *txr;
4851 struct lro_ctrl *lro;
4853 device_printf(dev,"Error Byte Count = %u \n",
4854 IXGBE_READ_REG(hw, IXGBE_ERRBC));
4856 for (int i = 0; i < adapter->num_queues; i++, que++) {
4860 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
4861 que->msix, (long)que->irqs);
4862 device_printf(dev,"RX[%d]: rdh = %d, hw rdt = %d\n",
4863 i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
4864 IXGBE_READ_REG(hw, IXGBE_RDT(i)));
4865 device_printf(dev,"TX[%d] tdh = %d, hw tdt = %d\n", i,
4866 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
4867 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
4868 device_printf(dev,"RX(%d) Packets Received: %lld\n",
4869 rxr->me, (long long)rxr->rx_packets);
4870 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
4871 rxr->me, (long long)rxr->rx_split_packets);
4872 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
4873 rxr->me, (long)rxr->rx_bytes);
4874 device_printf(dev,"RX(%d) LRO Queued= %d\n",
4875 rxr->me, lro->lro_queued);
4876 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
4877 rxr->me, lro->lro_flushed);
4878 device_printf(dev,"RX(%d) HW LRO Merges= %lu\n",
4879 rxr->me, (long)rxr->rsc_num);
4880 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
4881 txr->me, (long)txr->total_packets);
4882 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
4883 txr->me, (long)txr->no_desc_avail);
4886 device_printf(dev,"Link IRQ Handled: %lu\n",
4887 (long)adapter->link_irq);
4892 ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS)
4896 struct adapter *adapter;
4899 error = sysctl_handle_int(oidp, &result, 0, req);
4901 if (error || !req->newptr)
4905 adapter = (struct adapter *) arg1;
4906 ixgbe_print_hw_stats(adapter);
4912 ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS)
4915 struct adapter *adapter;
4918 error = sysctl_handle_int(oidp, &result, 0, req);
4920 if (error || !req->newptr)
4924 adapter = (struct adapter *) arg1;
4925 ixgbe_print_debug_info(adapter);
4931 ** Set flow control using sysctl:
4932 ** Flow control values:
4939 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4942 struct adapter *adapter;
4944 error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
4949 adapter = (struct adapter *) arg1;
4950 switch (ixgbe_flow_control) {
4951 case ixgbe_fc_rx_pause:
4952 case ixgbe_fc_tx_pause:
4954 adapter->hw.fc.requested_mode = ixgbe_flow_control;
4958 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4961 ixgbe_fc_enable(&adapter->hw, 0);
4966 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
4967 const char *description, int *limit, int value)
4970 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4971 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4972 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);