1 /******************************************************************************
3 Copyright (c) 2001-2011, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int ixgbe_display_debug_stats = 0;
47 /*********************************************************************
49 *********************************************************************/
50 char ixgbe_driver_version[] = "2.3.11";
52 /*********************************************************************
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixgbe_strings
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 *********************************************************************/
62 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
84 /* required last entry */
88 /*********************************************************************
89 * Table of branding strings
90 *********************************************************************/
92 static char *ixgbe_strings[] = {
93 "Intel(R) PRO/10GbE PCI-Express Network Driver"
96 /*********************************************************************
98 *********************************************************************/
99 static int ixgbe_probe(device_t);
100 static int ixgbe_attach(device_t);
101 static int ixgbe_detach(device_t);
102 static int ixgbe_shutdown(device_t);
103 static void ixgbe_start(struct ifnet *);
104 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
105 #if __FreeBSD_version >= 800000
106 static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
107 static int ixgbe_mq_start_locked(struct ifnet *,
108 struct tx_ring *, struct mbuf *);
109 static void ixgbe_qflush(struct ifnet *);
111 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
112 static void ixgbe_init(void *);
113 static void ixgbe_init_locked(struct adapter *);
114 static void ixgbe_stop(void *);
115 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
116 static int ixgbe_media_change(struct ifnet *);
117 static void ixgbe_identify_hardware(struct adapter *);
118 static int ixgbe_allocate_pci_resources(struct adapter *);
119 static int ixgbe_allocate_msix(struct adapter *);
120 static int ixgbe_allocate_legacy(struct adapter *);
121 static int ixgbe_allocate_queues(struct adapter *);
122 static int ixgbe_setup_msix(struct adapter *);
123 static void ixgbe_free_pci_resources(struct adapter *);
124 static void ixgbe_local_timer(void *);
125 static int ixgbe_setup_interface(device_t, struct adapter *);
126 static void ixgbe_config_link(struct adapter *);
128 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
129 static int ixgbe_setup_transmit_structures(struct adapter *);
130 static void ixgbe_setup_transmit_ring(struct tx_ring *);
131 static void ixgbe_initialize_transmit_units(struct adapter *);
132 static void ixgbe_free_transmit_structures(struct adapter *);
133 static void ixgbe_free_transmit_buffers(struct tx_ring *);
135 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
136 static int ixgbe_setup_receive_structures(struct adapter *);
137 static int ixgbe_setup_receive_ring(struct rx_ring *);
138 static void ixgbe_initialize_receive_units(struct adapter *);
139 static void ixgbe_free_receive_structures(struct adapter *);
140 static void ixgbe_free_receive_buffers(struct rx_ring *);
141 static void ixgbe_setup_hw_rsc(struct rx_ring *);
143 static void ixgbe_enable_intr(struct adapter *);
144 static void ixgbe_disable_intr(struct adapter *);
145 static void ixgbe_update_stats_counters(struct adapter *);
146 static bool ixgbe_txeof(struct tx_ring *);
147 static bool ixgbe_rxeof(struct ix_queue *, int);
148 static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
149 static void ixgbe_set_promisc(struct adapter *);
150 static void ixgbe_set_multi(struct adapter *);
151 static void ixgbe_update_link_status(struct adapter *);
152 static void ixgbe_refresh_mbufs(struct rx_ring *, int);
153 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
154 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
155 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
156 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
157 struct ixgbe_dma_alloc *, int);
158 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
159 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
160 const char *, int *, int);
161 static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
162 static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
163 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
164 static void ixgbe_configure_ivars(struct adapter *);
165 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
167 static void ixgbe_setup_vlan_hw_support(struct adapter *);
168 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
169 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
171 static void ixgbe_add_hw_stats(struct adapter *adapter);
173 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
174 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
177 /* Support for pluggable optic modules */
178 static bool ixgbe_sfp_probe(struct adapter *);
179 static void ixgbe_setup_optics(struct adapter *);
181 /* Legacy (single vector interrupt handler */
182 static void ixgbe_legacy_irq(void *);
184 /* The MSI/X Interrupt handlers */
185 static void ixgbe_msix_que(void *);
186 static void ixgbe_msix_link(void *);
188 /* Deferred interrupt tasklets */
189 static void ixgbe_handle_que(void *, int);
190 static void ixgbe_handle_link(void *, int);
191 static void ixgbe_handle_msf(void *, int);
192 static void ixgbe_handle_mod(void *, int);
195 static void ixgbe_atr(struct tx_ring *, struct mbuf *);
196 static void ixgbe_reinit_fdir(void *, int);
199 /*********************************************************************
200 * FreeBSD Device Interface Entry Points
201 *********************************************************************/
203 static device_method_t ixgbe_methods[] = {
204 /* Device interface */
205 DEVMETHOD(device_probe, ixgbe_probe),
206 DEVMETHOD(device_attach, ixgbe_attach),
207 DEVMETHOD(device_detach, ixgbe_detach),
208 DEVMETHOD(device_shutdown, ixgbe_shutdown),
212 static driver_t ixgbe_driver = {
213 "ix", ixgbe_methods, sizeof(struct adapter),
216 devclass_t ixgbe_devclass;
217 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
219 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
220 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
223 ** TUNEABLE PARAMETERS:
227 ** AIM: Adaptive Interrupt Moderation
228 ** which means that the interrupt rate
229 ** is varied over time based on the
230 ** traffic for that interrupt vector
232 static int ixgbe_enable_aim = TRUE;
233 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
235 static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
236 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
238 /* How many packets rxeof tries to clean at a time */
239 static int ixgbe_rx_process_limit = 128;
240 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
242 /* Flow control setting, default to full */
243 static int ixgbe_flow_control = ixgbe_fc_full;
244 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
247 ** Smart speed setting, default to on
248 ** this only works as a compile option
249 ** right now as its during attach, set
250 ** this to 'ixgbe_smart_speed_off' to
253 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
256 * MSIX should be the default for best performance,
257 * but this allows it to be forced off for testing.
259 static int ixgbe_enable_msix = 1;
260 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
263 * Header split: this causes the hardware to DMA
264 * the header into a separate mbuf from the payload,
265 * it can be a performance win in some workloads, but
266 * in others it actually hurts, its off by default.
268 static bool ixgbe_header_split = FALSE;
269 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
272 * Number of Queues, can be set to 0,
273 * it then autoconfigures based on the
274 * number of cpus with a max of 8. This
275 * can be overriden manually here.
277 static int ixgbe_num_queues = 0;
278 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
281 ** Number of TX descriptors per ring,
282 ** setting higher than RX as this seems
283 ** the better performing choice.
285 static int ixgbe_txd = PERFORM_TXD;
286 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
288 /* Number of RX descriptors per ring */
289 static int ixgbe_rxd = PERFORM_RXD;
290 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
292 /* Keep running tab on them for sanity check */
293 static int ixgbe_total_ports;
297 ** For Flow Director: this is the
298 ** number of TX packets we sample
299 ** for the filter pool, this means
300 ** every 20th packet will be probed.
302 ** This feature can be disabled by
303 ** setting this to 0.
305 static int atr_sample_rate = 20;
307 ** Flow Director actually 'steals'
308 ** part of the packet buffer as its
309 ** filter pool, this variable controls
311 ** 0 = 64K, 1 = 128K, 2 = 256K
313 static int fdir_pballoc = 1;
316 /*********************************************************************
317 * Device identification routine
319 * ixgbe_probe determines if the driver should be loaded on
320 * adapter based on PCI vendor/device id of the adapter.
322 * return BUS_PROBE_DEFAULT on success, positive on failure
323 *********************************************************************/
326 ixgbe_probe(device_t dev)
328 ixgbe_vendor_info_t *ent;
330 u16 pci_vendor_id = 0;
331 u16 pci_device_id = 0;
332 u16 pci_subvendor_id = 0;
333 u16 pci_subdevice_id = 0;
334 char adapter_name[256];
336 INIT_DEBUGOUT("ixgbe_probe: begin");
338 pci_vendor_id = pci_get_vendor(dev);
339 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
342 pci_device_id = pci_get_device(dev);
343 pci_subvendor_id = pci_get_subvendor(dev);
344 pci_subdevice_id = pci_get_subdevice(dev);
346 ent = ixgbe_vendor_info_array;
347 while (ent->vendor_id != 0) {
348 if ((pci_vendor_id == ent->vendor_id) &&
349 (pci_device_id == ent->device_id) &&
351 ((pci_subvendor_id == ent->subvendor_id) ||
352 (ent->subvendor_id == 0)) &&
354 ((pci_subdevice_id == ent->subdevice_id) ||
355 (ent->subdevice_id == 0))) {
356 sprintf(adapter_name, "%s, Version - %s",
357 ixgbe_strings[ent->index],
358 ixgbe_driver_version);
359 device_set_desc_copy(dev, adapter_name);
361 return (BUS_PROBE_DEFAULT);
368 /*********************************************************************
369 * Device initialization routine
371 * The attach entry point is called when the driver is being loaded.
372 * This routine identifies the type of hardware, allocates all resources
373 * and initializes the hardware.
375 * return 0 on success, positive on failure
376 *********************************************************************/
379 ixgbe_attach(device_t dev)
381 struct adapter *adapter;
387 INIT_DEBUGOUT("ixgbe_attach: begin");
389 if (resource_disabled("ixgbe", device_get_unit(dev))) {
390 device_printf(dev, "Disabled by device hint\n");
394 /* Allocate, clear, and link in our adapter structure */
395 adapter = device_get_softc(dev);
396 adapter->dev = adapter->osdep.dev = dev;
400 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
404 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
405 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
406 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
407 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
409 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
410 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
411 OID_AUTO, "advertise_gig", CTLTYPE_INT | CTLFLAG_RW,
412 adapter, 0, ixgbe_set_advertise, "I", "1G Link");
414 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
415 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
416 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
417 &ixgbe_enable_aim, 1, "Interrupt Moderation");
419 /* Set up the timer callout */
420 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
422 /* Determine hardware revision */
423 ixgbe_identify_hardware(adapter);
425 /* Do base PCI setup - map BAR0 */
426 if (ixgbe_allocate_pci_resources(adapter)) {
427 device_printf(dev, "Allocation of PCI resources failed\n");
432 /* Do descriptor calc and sanity checks */
433 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
434 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
435 device_printf(dev, "TXD config issue, using default!\n");
436 adapter->num_tx_desc = DEFAULT_TXD;
438 adapter->num_tx_desc = ixgbe_txd;
441 ** With many RX rings it is easy to exceed the
442 ** system mbuf allocation. Tuning nmbclusters
443 ** can alleviate this.
445 if (nmbclusters > 0 ) {
447 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
448 if (s > nmbclusters) {
449 device_printf(dev, "RX Descriptors exceed "
450 "system mbuf max, using default instead!\n");
451 ixgbe_rxd = DEFAULT_RXD;
455 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
456 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
457 device_printf(dev, "RXD config issue, using default!\n");
458 adapter->num_rx_desc = DEFAULT_RXD;
460 adapter->num_rx_desc = ixgbe_rxd;
462 /* Allocate our TX/RX Queues */
463 if (ixgbe_allocate_queues(adapter)) {
468 /* Allocate multicast array memory. */
469 adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
470 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
471 if (adapter->mta == NULL) {
472 device_printf(dev, "Can not allocate multicast setup array\n");
477 /* Initialize the shared code */
478 error = ixgbe_init_shared_code(hw);
479 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
481 ** No optics in this port, set up
482 ** so the timer routine will probe
483 ** for later insertion.
485 adapter->sfp_probe = TRUE;
487 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
488 device_printf(dev,"Unsupported SFP+ module detected!\n");
492 device_printf(dev,"Unable to initialize the shared code\n");
497 /* Make sure we have a good EEPROM before we read from it */
498 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
499 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
504 /* Get Hardware Flow Control setting */
505 hw->fc.requested_mode = ixgbe_fc_full;
506 hw->fc.pause_time = IXGBE_FC_PAUSE;
507 hw->fc.low_water = IXGBE_FC_LO;
508 hw->fc.high_water = IXGBE_FC_HI;
509 hw->fc.send_xon = TRUE;
511 error = ixgbe_init_hw(hw);
512 if (error == IXGBE_ERR_EEPROM_VERSION) {
513 device_printf(dev, "This device is a pre-production adapter/"
514 "LOM. Please be aware there may be issues associated "
515 "with your hardware.\n If you are experiencing problems "
516 "please contact your Intel or hardware representative "
517 "who provided you with this hardware.\n");
518 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
519 device_printf(dev,"Unsupported SFP+ Module\n");
523 device_printf(dev,"Hardware Initialization Failure\n");
527 /* Detect and set physical type */
528 ixgbe_setup_optics(adapter);
530 if ((adapter->msix > 1) && (ixgbe_enable_msix))
531 error = ixgbe_allocate_msix(adapter);
533 error = ixgbe_allocate_legacy(adapter);
537 /* Setup OS specific network interface */
538 if (ixgbe_setup_interface(dev, adapter) != 0)
541 /* Sysctl for limiting the amount of work done in the taskqueue */
542 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
543 "max number of rx packets to process", &adapter->rx_process_limit,
544 ixgbe_rx_process_limit);
546 /* Initialize statistics */
547 ixgbe_update_stats_counters(adapter);
549 /* Register for VLAN events */
550 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
551 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
552 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
553 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
555 /* Print PCIE bus type/speed/width info */
556 ixgbe_get_bus_info(hw);
557 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
558 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
559 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
560 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
561 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
562 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
565 if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
566 (hw->bus.speed == ixgbe_bus_speed_2500)) {
567 device_printf(dev, "PCI-Express bandwidth available"
568 " for this card\n is not sufficient for"
569 " optimal performance.\n");
570 device_printf(dev, "For optimal performance a x8 "
571 "PCIE, or x4 PCIE 2 slot is required.\n");
574 /* let hardware know driver is loaded */
575 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
576 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
577 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
579 ixgbe_add_hw_stats(adapter);
581 INIT_DEBUGOUT("ixgbe_attach: end");
584 ixgbe_free_transmit_structures(adapter);
585 ixgbe_free_receive_structures(adapter);
587 if (adapter->ifp != NULL)
588 if_free(adapter->ifp);
589 ixgbe_free_pci_resources(adapter);
590 free(adapter->mta, M_DEVBUF);
595 /*********************************************************************
596 * Device removal routine
598 * The detach entry point is called when the driver is being removed.
599 * This routine stops the adapter and deallocates all the resources
600 * that were allocated for driver operation.
602 * return 0 on success, positive on failure
603 *********************************************************************/
606 ixgbe_detach(device_t dev)
608 struct adapter *adapter = device_get_softc(dev);
609 struct ix_queue *que = adapter->queues;
612 INIT_DEBUGOUT("ixgbe_detach: begin");
614 /* Make sure VLANS are not using driver */
615 if (adapter->ifp->if_vlantrunk != NULL) {
616 device_printf(dev,"Vlan in use, detach first\n");
620 IXGBE_CORE_LOCK(adapter);
622 IXGBE_CORE_UNLOCK(adapter);
624 for (int i = 0; i < adapter->num_queues; i++, que++) {
626 taskqueue_drain(que->tq, &que->que_task);
627 taskqueue_free(que->tq);
631 /* Drain the Link queue */
633 taskqueue_drain(adapter->tq, &adapter->link_task);
634 taskqueue_drain(adapter->tq, &adapter->mod_task);
635 taskqueue_drain(adapter->tq, &adapter->msf_task);
637 taskqueue_drain(adapter->tq, &adapter->fdir_task);
639 taskqueue_free(adapter->tq);
642 /* let hardware know driver is unloading */
643 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
644 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
645 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
647 /* Unregister VLAN events */
648 if (adapter->vlan_attach != NULL)
649 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
650 if (adapter->vlan_detach != NULL)
651 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
653 ether_ifdetach(adapter->ifp);
654 callout_drain(&adapter->timer);
655 ixgbe_free_pci_resources(adapter);
656 bus_generic_detach(dev);
657 if_free(adapter->ifp);
659 ixgbe_free_transmit_structures(adapter);
660 ixgbe_free_receive_structures(adapter);
661 free(adapter->mta, M_DEVBUF);
663 IXGBE_CORE_LOCK_DESTROY(adapter);
667 /*********************************************************************
669 * Shutdown entry point
671 **********************************************************************/
674 ixgbe_shutdown(device_t dev)
676 struct adapter *adapter = device_get_softc(dev);
677 IXGBE_CORE_LOCK(adapter);
679 IXGBE_CORE_UNLOCK(adapter);
684 /*********************************************************************
685 * Transmit entry point
687 * ixgbe_start is called by the stack to initiate a transmit.
688 * The driver will remain in this routine as long as there are
689 * packets to transmit and transmit resources are available.
690 * In case resources are not available stack is notified and
691 * the packet is requeued.
692 **********************************************************************/
695 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
698 struct adapter *adapter = txr->adapter;
700 IXGBE_TX_LOCK_ASSERT(txr);
702 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
705 if (!adapter->link_active)
708 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
710 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
714 if (ixgbe_xmit(txr, &m_head)) {
717 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
718 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
721 /* Send a copy of the frame to the BPF listener */
722 ETHER_BPF_MTAP(ifp, m_head);
724 /* Set watchdog on */
725 txr->watchdog_time = ticks;
726 txr->queue_status = IXGBE_QUEUE_WORKING;
733 * Legacy TX start - called by the stack, this
734 * always uses the first tx ring, and should
735 * not be used with multiqueue tx enabled.
738 ixgbe_start(struct ifnet *ifp)
740 struct adapter *adapter = ifp->if_softc;
741 struct tx_ring *txr = adapter->tx_rings;
743 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
745 ixgbe_start_locked(txr, ifp);
746 IXGBE_TX_UNLOCK(txr);
751 #if __FreeBSD_version >= 800000
753 ** Multiqueue Transmit driver
757 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
759 struct adapter *adapter = ifp->if_softc;
760 struct ix_queue *que;
764 /* Which queue to use */
765 if ((m->m_flags & M_FLOWID) != 0)
766 i = m->m_pkthdr.flowid % adapter->num_queues;
768 txr = &adapter->tx_rings[i];
769 que = &adapter->queues[i];
771 if (IXGBE_TX_TRYLOCK(txr)) {
772 err = ixgbe_mq_start_locked(ifp, txr, m);
773 IXGBE_TX_UNLOCK(txr);
775 err = drbr_enqueue(ifp, txr->br, m);
776 taskqueue_enqueue(que->tq, &que->que_task);
783 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
785 struct adapter *adapter = txr->adapter;
787 int enqueued, err = 0;
789 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
790 IFF_DRV_RUNNING || adapter->link_active == 0) {
792 err = drbr_enqueue(ifp, txr->br, m);
798 next = drbr_dequeue(ifp, txr->br);
799 } else if (drbr_needs_enqueue(ifp, txr->br)) {
800 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
802 next = drbr_dequeue(ifp, txr->br);
806 /* Process the queue */
807 while (next != NULL) {
808 if ((err = ixgbe_xmit(txr, &next)) != 0) {
810 err = drbr_enqueue(ifp, txr->br, next);
814 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
815 /* Send a copy of the frame to the BPF listener */
816 ETHER_BPF_MTAP(ifp, next);
817 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
819 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
821 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
822 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
825 next = drbr_dequeue(ifp, txr->br);
829 /* Set watchdog on */
830 txr->queue_status = IXGBE_QUEUE_WORKING;
831 txr->watchdog_time = ticks;
838 ** Flush all ring buffers
841 ixgbe_qflush(struct ifnet *ifp)
843 struct adapter *adapter = ifp->if_softc;
844 struct tx_ring *txr = adapter->tx_rings;
847 for (int i = 0; i < adapter->num_queues; i++, txr++) {
849 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
851 IXGBE_TX_UNLOCK(txr);
855 #endif /* __FreeBSD_version >= 800000 */
857 /*********************************************************************
860 * ixgbe_ioctl is called when the user wants to configure the
863 * return 0 on success, positive on failure
864 **********************************************************************/
867 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
869 struct adapter *adapter = ifp->if_softc;
870 struct ifreq *ifr = (struct ifreq *) data;
871 #if defined(INET) || defined(INET6)
872 struct ifaddr *ifa = (struct ifaddr *)data;
873 bool avoid_reset = FALSE;
881 if (ifa->ifa_addr->sa_family == AF_INET)
885 if (ifa->ifa_addr->sa_family == AF_INET6)
888 #if defined(INET) || defined(INET6)
890 ** Calling init results in link renegotiation,
891 ** so we avoid doing it when possible.
894 ifp->if_flags |= IFF_UP;
895 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
897 if (!(ifp->if_flags & IFF_NOARP))
898 arp_ifinit(ifp, ifa);
900 error = ether_ioctl(ifp, command, data);
904 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
905 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
908 IXGBE_CORE_LOCK(adapter);
909 ifp->if_mtu = ifr->ifr_mtu;
910 adapter->max_frame_size =
911 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
912 ixgbe_init_locked(adapter);
913 IXGBE_CORE_UNLOCK(adapter);
917 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
918 IXGBE_CORE_LOCK(adapter);
919 if (ifp->if_flags & IFF_UP) {
920 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
921 if ((ifp->if_flags ^ adapter->if_flags) &
922 (IFF_PROMISC | IFF_ALLMULTI)) {
923 ixgbe_set_promisc(adapter);
926 ixgbe_init_locked(adapter);
928 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
930 adapter->if_flags = ifp->if_flags;
931 IXGBE_CORE_UNLOCK(adapter);
935 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
936 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
937 IXGBE_CORE_LOCK(adapter);
938 ixgbe_disable_intr(adapter);
939 ixgbe_set_multi(adapter);
940 ixgbe_enable_intr(adapter);
941 IXGBE_CORE_UNLOCK(adapter);
946 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
947 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
951 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
952 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
953 if (mask & IFCAP_HWCSUM)
954 ifp->if_capenable ^= IFCAP_HWCSUM;
955 if (mask & IFCAP_TSO4)
956 ifp->if_capenable ^= IFCAP_TSO4;
957 if (mask & IFCAP_LRO)
958 ifp->if_capenable ^= IFCAP_LRO;
959 if (mask & IFCAP_VLAN_HWTAGGING)
960 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
961 if (mask & IFCAP_VLAN_HWFILTER)
962 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
963 if (mask & IFCAP_VLAN_HWTSO)
964 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
965 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
966 IXGBE_CORE_LOCK(adapter);
967 ixgbe_init_locked(adapter);
968 IXGBE_CORE_UNLOCK(adapter);
970 VLAN_CAPABILITIES(ifp);
975 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
976 error = ether_ioctl(ifp, command, data);
983 /*********************************************************************
986 * This routine is used in two ways. It is used by the stack as
987 * init entry point in network interface structure. It is also used
988 * by the driver as a hw/sw initialization routine to get to a
991 * return 0 on success, positive on failure
992 **********************************************************************/
993 #define IXGBE_MHADD_MFS_SHIFT 16
996 ixgbe_init_locked(struct adapter *adapter)
998 struct ifnet *ifp = adapter->ifp;
999 device_t dev = adapter->dev;
1000 struct ixgbe_hw *hw = &adapter->hw;
1001 u32 k, txdctl, mhadd, gpie;
1004 mtx_assert(&adapter->core_mtx, MA_OWNED);
1005 INIT_DEBUGOUT("ixgbe_init: begin");
1006 hw->adapter_stopped = FALSE;
1007 ixgbe_stop_adapter(hw);
1008 callout_stop(&adapter->timer);
1010 /* reprogram the RAR[0] in case user changed it. */
1011 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1013 /* Get the latest mac address, User can use a LAA */
1014 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
1015 IXGBE_ETH_LENGTH_OF_ADDRESS);
1016 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
1017 hw->addr_ctrl.rar_used_count = 1;
1019 /* Set the various hardware offload abilities */
1020 ifp->if_hwassist = 0;
1021 if (ifp->if_capenable & IFCAP_TSO4)
1022 ifp->if_hwassist |= CSUM_TSO;
1023 if (ifp->if_capenable & IFCAP_TXCSUM) {
1024 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1025 #if __FreeBSD_version >= 800000
1026 if (hw->mac.type != ixgbe_mac_82598EB)
1027 ifp->if_hwassist |= CSUM_SCTP;
1031 /* Prepare transmit descriptors and buffers */
1032 if (ixgbe_setup_transmit_structures(adapter)) {
1033 device_printf(dev,"Could not setup transmit structures\n");
1034 ixgbe_stop(adapter);
1039 ixgbe_initialize_transmit_units(adapter);
1041 /* Setup Multicast table */
1042 ixgbe_set_multi(adapter);
1045 ** Determine the correct mbuf pool
1046 ** for doing jumbo/headersplit
1048 if (adapter->max_frame_size <= 2048)
1049 adapter->rx_mbuf_sz = MCLBYTES;
1050 else if (adapter->max_frame_size <= 4096)
1051 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1052 else if (adapter->max_frame_size <= 9216)
1053 adapter->rx_mbuf_sz = MJUM9BYTES;
1055 adapter->rx_mbuf_sz = MJUM16BYTES;
1057 /* Prepare receive descriptors and buffers */
1058 if (ixgbe_setup_receive_structures(adapter)) {
1059 device_printf(dev,"Could not setup receive structures\n");
1060 ixgbe_stop(adapter);
1064 /* Configure RX settings */
1065 ixgbe_initialize_receive_units(adapter);
1067 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1069 /* Enable Fan Failure Interrupt */
1070 gpie |= IXGBE_SDP1_GPIEN;
1072 /* Add for Thermal detection */
1073 if (hw->mac.type == ixgbe_mac_82599EB)
1074 gpie |= IXGBE_SDP2_GPIEN;
1076 if (adapter->msix > 1) {
1077 /* Enable Enhanced MSIX mode */
1078 gpie |= IXGBE_GPIE_MSIX_MODE;
1079 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1082 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1085 if (ifp->if_mtu > ETHERMTU) {
1086 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1087 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1088 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1089 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1092 /* Now enable all the queues */
1094 for (int i = 0; i < adapter->num_queues; i++) {
1095 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1096 txdctl |= IXGBE_TXDCTL_ENABLE;
1097 /* Set WTHRESH to 8, burst writeback */
1098 txdctl |= (8 << 16);
1099 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1102 for (int i = 0; i < adapter->num_queues; i++) {
1103 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1104 if (hw->mac.type == ixgbe_mac_82598EB) {
1110 rxdctl &= ~0x3FFFFF;
1113 rxdctl |= IXGBE_RXDCTL_ENABLE;
1114 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1115 for (k = 0; k < 10; k++) {
1116 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1117 IXGBE_RXDCTL_ENABLE)
1123 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1126 /* Set up VLAN support and filter */
1127 ixgbe_setup_vlan_hw_support(adapter);
1129 /* Enable Receive engine */
1130 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1131 if (hw->mac.type == ixgbe_mac_82598EB)
1132 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1133 rxctrl |= IXGBE_RXCTRL_RXEN;
1134 ixgbe_enable_rx_dma(hw, rxctrl);
1136 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1138 /* Set up MSI/X routing */
1139 if (ixgbe_enable_msix) {
1140 ixgbe_configure_ivars(adapter);
1141 /* Set up auto-mask */
1142 if (hw->mac.type == ixgbe_mac_82598EB)
1143 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1145 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1146 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1148 } else { /* Simple settings for Legacy/MSI */
1149 ixgbe_set_ivar(adapter, 0, 0, 0);
1150 ixgbe_set_ivar(adapter, 0, 0, 1);
1151 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1155 /* Init Flow director */
1156 if (hw->mac.type != ixgbe_mac_82598EB)
1157 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1161 ** Check on any SFP devices that
1162 ** need to be kick-started
1164 if (hw->phy.type == ixgbe_phy_none) {
1165 int err = hw->phy.ops.identify(hw);
1166 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1168 "Unsupported SFP+ module type was detected.\n");
1173 /* Set moderation on the Link interrupt */
1174 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
1176 /* Config/Enable Link */
1177 ixgbe_config_link(adapter);
1179 /* And now turn on interrupts */
1180 ixgbe_enable_intr(adapter);
1182 /* Now inform the stack we're ready */
1183 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1184 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1190 ixgbe_init(void *arg)
1192 struct adapter *adapter = arg;
1194 IXGBE_CORE_LOCK(adapter);
1195 ixgbe_init_locked(adapter);
1196 IXGBE_CORE_UNLOCK(adapter);
1203 ** MSIX Interrupt Handlers and Tasklets
1208 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1210 struct ixgbe_hw *hw = &adapter->hw;
1211 u64 queue = (u64)(1 << vector);
1214 if (hw->mac.type == ixgbe_mac_82598EB) {
1215 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1216 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1218 mask = (queue & 0xFFFFFFFF);
1220 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1221 mask = (queue >> 32);
1223 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1228 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1230 struct ixgbe_hw *hw = &adapter->hw;
1231 u64 queue = (u64)(1 << vector);
1234 if (hw->mac.type == ixgbe_mac_82598EB) {
1235 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1236 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1238 mask = (queue & 0xFFFFFFFF);
1240 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1241 mask = (queue >> 32);
1243 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1248 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
1252 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1253 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
1254 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1256 mask = (queues & 0xFFFFFFFF);
1257 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
1258 mask = (queues >> 32);
1259 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
1265 ixgbe_handle_que(void *context, int pending)
1267 struct ix_queue *que = context;
1268 struct adapter *adapter = que->adapter;
1269 struct tx_ring *txr = que->txr;
1270 struct ifnet *ifp = adapter->ifp;
1273 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1274 more = ixgbe_rxeof(que, adapter->rx_process_limit);
1277 #if __FreeBSD_version >= 800000
1278 if (!drbr_empty(ifp, txr->br))
1279 ixgbe_mq_start_locked(ifp, txr, NULL);
1281 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1282 ixgbe_start_locked(txr, ifp);
1284 IXGBE_TX_UNLOCK(txr);
1286 taskqueue_enqueue(que->tq, &que->que_task);
1291 /* Reenable this interrupt */
1292 ixgbe_enable_queue(adapter, que->msix);
1297 /*********************************************************************
1299 * Legacy Interrupt Service routine
1301 **********************************************************************/
1304 ixgbe_legacy_irq(void *arg)
1306 struct ix_queue *que = arg;
1307 struct adapter *adapter = que->adapter;
1308 struct ixgbe_hw *hw = &adapter->hw;
1309 struct tx_ring *txr = adapter->tx_rings;
1310 bool more_tx, more_rx;
1311 u32 reg_eicr, loop = MAX_LOOP;
1314 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1317 if (reg_eicr == 0) {
1318 ixgbe_enable_intr(adapter);
1322 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1326 more_tx = ixgbe_txeof(txr);
1327 } while (loop-- && more_tx);
1328 IXGBE_TX_UNLOCK(txr);
1330 if (more_rx || more_tx)
1331 taskqueue_enqueue(que->tq, &que->que_task);
1333 /* Check for fan failure */
1334 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1335 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1336 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1337 "REPLACE IMMEDIATELY!!\n");
1338 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1341 /* Link status change */
1342 if (reg_eicr & IXGBE_EICR_LSC)
1343 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1345 ixgbe_enable_intr(adapter);
1350 /*********************************************************************
1352 * MSIX Queue Interrupt Service routine
1354 **********************************************************************/
1356 ixgbe_msix_que(void *arg)
1358 struct ix_queue *que = arg;
1359 struct adapter *adapter = que->adapter;
1360 struct tx_ring *txr = que->txr;
1361 struct rx_ring *rxr = que->rxr;
1362 bool more_tx, more_rx;
1367 more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1370 more_tx = ixgbe_txeof(txr);
1372 ** Make certain that if the stack
1373 ** has anything queued the task gets
1374 ** scheduled to handle it.
1376 #if __FreeBSD_version < 800000
1377 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1379 if (!drbr_empty(adapter->ifp, txr->br))
1382 IXGBE_TX_UNLOCK(txr);
1386 if (ixgbe_enable_aim == FALSE)
1389 ** Do Adaptive Interrupt Moderation:
1390 ** - Write out last calculated setting
1391 ** - Calculate based on average size over
1392 ** the last interval.
1394 if (que->eitr_setting)
1395 IXGBE_WRITE_REG(&adapter->hw,
1396 IXGBE_EITR(que->msix), que->eitr_setting);
1398 que->eitr_setting = 0;
1400 /* Idle, do nothing */
1401 if ((txr->bytes == 0) && (rxr->bytes == 0))
1404 if ((txr->bytes) && (txr->packets))
1405 newitr = txr->bytes/txr->packets;
1406 if ((rxr->bytes) && (rxr->packets))
1407 newitr = max(newitr,
1408 (rxr->bytes / rxr->packets));
1409 newitr += 24; /* account for hardware frame, crc */
1411 /* set an upper boundary */
1412 newitr = min(newitr, 3000);
1414 /* Be nice to the mid range */
1415 if ((newitr > 300) && (newitr < 1200))
1416 newitr = (newitr / 3);
1418 newitr = (newitr / 2);
1420 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1421 newitr |= newitr << 16;
1423 newitr |= IXGBE_EITR_CNT_WDIS;
1425 /* save for next interrupt */
1426 que->eitr_setting = newitr;
1435 if (more_tx || more_rx)
1436 taskqueue_enqueue(que->tq, &que->que_task);
1437 else /* Reenable this interrupt */
1438 ixgbe_enable_queue(adapter, que->msix);
1444 ixgbe_msix_link(void *arg)
1446 struct adapter *adapter = arg;
1447 struct ixgbe_hw *hw = &adapter->hw;
1450 ++adapter->link_irq;
1452 /* First get the cause */
1453 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1454 /* Clear interrupt with write */
1455 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1457 /* Link status change */
1458 if (reg_eicr & IXGBE_EICR_LSC)
1459 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1461 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1463 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1464 /* This is probably overkill :) */
1465 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1467 /* Clear the interrupt */
1468 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1469 /* Turn off the interface */
1470 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1471 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1474 if (reg_eicr & IXGBE_EICR_ECC) {
1475 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1476 "Please Reboot!!\n");
1477 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1480 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1481 /* Clear the interrupt */
1482 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1483 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1484 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1485 /* Clear the interrupt */
1486 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1487 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1491 /* Check for fan failure */
1492 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1493 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1494 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1495 "REPLACE IMMEDIATELY!!\n");
1496 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1499 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1503 /*********************************************************************
1505 * Media Ioctl callback
1507 * This routine is called whenever the user queries the status of
1508 * the interface using ifconfig.
1510 **********************************************************************/
1512 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1514 struct adapter *adapter = ifp->if_softc;
1516 INIT_DEBUGOUT("ixgbe_media_status: begin");
1517 IXGBE_CORE_LOCK(adapter);
1518 ixgbe_update_link_status(adapter);
1520 ifmr->ifm_status = IFM_AVALID;
1521 ifmr->ifm_active = IFM_ETHER;
1523 if (!adapter->link_active) {
1524 IXGBE_CORE_UNLOCK(adapter);
1528 ifmr->ifm_status |= IFM_ACTIVE;
1530 switch (adapter->link_speed) {
1531 case IXGBE_LINK_SPEED_1GB_FULL:
1532 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1534 case IXGBE_LINK_SPEED_10GB_FULL:
1535 ifmr->ifm_active |= adapter->optics | IFM_FDX;
1539 IXGBE_CORE_UNLOCK(adapter);
1544 /*********************************************************************
1546 * Media Ioctl callback
1548 * This routine is called when the user changes speed/duplex using
1549 * media/mediopt option with ifconfig.
1551 **********************************************************************/
1553 ixgbe_media_change(struct ifnet * ifp)
1555 struct adapter *adapter = ifp->if_softc;
1556 struct ifmedia *ifm = &adapter->media;
1558 INIT_DEBUGOUT("ixgbe_media_change: begin");
1560 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1563 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1565 adapter->hw.phy.autoneg_advertised =
1566 IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
1569 device_printf(adapter->dev, "Only auto media type\n");
1576 /*********************************************************************
1578 * This routine maps the mbufs to tx descriptors, allowing the
1579 * TX engine to transmit the packets.
1580 * - return 0 on success, positive on failure
1582 **********************************************************************/
1585 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1587 struct adapter *adapter = txr->adapter;
1588 u32 olinfo_status = 0, cmd_type_len;
1590 int i, j, error, nsegs;
1591 int first, last = 0;
1592 struct mbuf *m_head;
1593 bus_dma_segment_t segs[adapter->num_segs];
1595 struct ixgbe_tx_buf *txbuf;
1596 union ixgbe_adv_tx_desc *txd = NULL;
1600 /* Basic descriptor defines */
1601 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1602 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1604 if (m_head->m_flags & M_VLANTAG)
1605 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1608 * Important to capture the first descriptor
1609 * used because it will contain the index of
1610 * the one we tell the hardware to report back
1612 first = txr->next_avail_desc;
1613 txbuf = &txr->tx_buffers[first];
1617 * Map the packet for DMA.
1619 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1620 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1622 if (error == EFBIG) {
1625 m = m_defrag(*m_headp, M_DONTWAIT);
1627 adapter->mbuf_defrag_failed++;
1635 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1636 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1638 if (error == ENOMEM) {
1639 adapter->no_tx_dma_setup++;
1641 } else if (error != 0) {
1642 adapter->no_tx_dma_setup++;
1647 } else if (error == ENOMEM) {
1648 adapter->no_tx_dma_setup++;
1650 } else if (error != 0) {
1651 adapter->no_tx_dma_setup++;
1657 /* Make certain there are enough descriptors */
1658 if (nsegs > txr->tx_avail - 2) {
1659 txr->no_desc_avail++;
1666 ** Set up the appropriate offload context
1667 ** this becomes the first descriptor of
1670 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1671 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1672 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1673 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1674 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1675 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1679 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1680 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1682 #ifdef IXGBE_IEEE1588
1683 /* This is changing soon to an mtag detection */
1684 if (we detect this mbuf has a TSTAMP mtag)
1685 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
1689 /* Do the flow director magic */
1690 if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
1692 if (txr->atr_count >= atr_sample_rate) {
1693 ixgbe_atr(txr, m_head);
1698 /* Record payload length */
1700 olinfo_status |= m_head->m_pkthdr.len <<
1701 IXGBE_ADVTXD_PAYLEN_SHIFT;
1703 i = txr->next_avail_desc;
1704 for (j = 0; j < nsegs; j++) {
1708 txbuf = &txr->tx_buffers[i];
1709 txd = &txr->tx_base[i];
1710 seglen = segs[j].ds_len;
1711 segaddr = htole64(segs[j].ds_addr);
1713 txd->read.buffer_addr = segaddr;
1714 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1715 cmd_type_len |seglen);
1716 txd->read.olinfo_status = htole32(olinfo_status);
1717 last = i; /* descriptor that will get completion IRQ */
1719 if (++i == adapter->num_tx_desc)
1722 txbuf->m_head = NULL;
1723 txbuf->eop_index = -1;
1726 txd->read.cmd_type_len |=
1727 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1728 txr->tx_avail -= nsegs;
1729 txr->next_avail_desc = i;
1731 txbuf->m_head = m_head;
1732 /* Swap the dma map between the first and last descriptor */
1733 txr->tx_buffers[first].map = txbuf->map;
1735 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1737 /* Set the index of the descriptor that will be marked done */
1738 txbuf = &txr->tx_buffers[first];
1739 txbuf->eop_index = last;
1741 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1742 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1744 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1745 * hardware that this frame is available to transmit.
1747 ++txr->total_packets;
1748 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1753 bus_dmamap_unload(txr->txtag, txbuf->map);
1759 ixgbe_set_promisc(struct adapter *adapter)
1762 struct ifnet *ifp = adapter->ifp;
1764 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1765 reg_rctl &= (~IXGBE_FCTRL_UPE);
1766 reg_rctl &= (~IXGBE_FCTRL_MPE);
1767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1769 if (ifp->if_flags & IFF_PROMISC) {
1770 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1771 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1772 } else if (ifp->if_flags & IFF_ALLMULTI) {
1773 reg_rctl |= IXGBE_FCTRL_MPE;
1774 reg_rctl &= ~IXGBE_FCTRL_UPE;
1775 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1781 /*********************************************************************
1784 * This routine is called whenever multicast address list is updated.
1786 **********************************************************************/
1787 #define IXGBE_RAR_ENTRIES 16
1790 ixgbe_set_multi(struct adapter *adapter)
1795 struct ifmultiaddr *ifma;
1797 struct ifnet *ifp = adapter->ifp;
1799 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1802 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1803 MAX_NUM_MULTICAST_ADDRESSES);
1805 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1806 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1807 if (ifp->if_flags & IFF_PROMISC)
1808 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1809 else if (ifp->if_flags & IFF_ALLMULTI) {
1810 fctrl |= IXGBE_FCTRL_MPE;
1811 fctrl &= ~IXGBE_FCTRL_UPE;
1813 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1815 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1817 #if __FreeBSD_version < 800000
1820 if_maddr_rlock(ifp);
1822 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1823 if (ifma->ifma_addr->sa_family != AF_LINK)
1825 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1826 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1827 IXGBE_ETH_LENGTH_OF_ADDRESS);
1830 #if __FreeBSD_version < 800000
1831 IF_ADDR_UNLOCK(ifp);
1833 if_maddr_runlock(ifp);
1837 ixgbe_update_mc_addr_list(&adapter->hw,
1838 update_ptr, mcnt, ixgbe_mc_array_itr);
1844 * This is an iterator function now needed by the multicast
1845 * shared code. It simply feeds the shared code routine the
1846 * addresses in the array of ixgbe_set_multi() one by one.
1849 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1851 u8 *addr = *update_ptr;
1855 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1856 *update_ptr = newptr;
1861 /*********************************************************************
1864 * This routine checks for link status,updates statistics,
1865 * and runs the watchdog check.
1867 **********************************************************************/
1870 ixgbe_local_timer(void *arg)
1872 struct adapter *adapter = arg;
1873 device_t dev = adapter->dev;
1874 struct tx_ring *txr = adapter->tx_rings;
1876 mtx_assert(&adapter->core_mtx, MA_OWNED);
1878 /* Check for pluggable optics */
1879 if (adapter->sfp_probe)
1880 if (!ixgbe_sfp_probe(adapter))
1881 goto out; /* Nothing to do */
1883 ixgbe_update_link_status(adapter);
1884 ixgbe_update_stats_counters(adapter);
1887 * If the interface has been paused
1888 * then don't do the watchdog check
1890 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1894 ** Check status on the TX queues for a hang
1896 for (int i = 0; i < adapter->num_queues; i++, txr++)
1897 if (txr->queue_status == IXGBE_QUEUE_HUNG)
1901 ixgbe_rearm_queues(adapter, adapter->que_mask);
1902 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1906 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1907 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1908 IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
1909 IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
1910 device_printf(dev,"TX(%d) desc avail = %d,"
1911 "Next TX to Clean = %d\n",
1912 txr->me, txr->tx_avail, txr->next_to_clean);
1913 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1914 adapter->watchdog_events++;
1915 ixgbe_init_locked(adapter);
1919 ** Note: this routine updates the OS on the link state
1920 ** the real check of the hardware only happens with
1921 ** a link interrupt.
1924 ixgbe_update_link_status(struct adapter *adapter)
1926 struct ifnet *ifp = adapter->ifp;
1927 struct tx_ring *txr = adapter->tx_rings;
1928 device_t dev = adapter->dev;
1931 if (adapter->link_up){
1932 if (adapter->link_active == FALSE) {
1934 device_printf(dev,"Link is up %d Gbps %s \n",
1935 ((adapter->link_speed == 128)? 10:1),
1937 adapter->link_active = TRUE;
1938 if_link_state_change(ifp, LINK_STATE_UP);
1940 } else { /* Link down */
1941 if (adapter->link_active == TRUE) {
1943 device_printf(dev,"Link is Down\n");
1944 if_link_state_change(ifp, LINK_STATE_DOWN);
1945 adapter->link_active = FALSE;
1946 for (int i = 0; i < adapter->num_queues;
1948 txr->queue_status = IXGBE_QUEUE_IDLE;
1956 /*********************************************************************
1958 * This routine disables all traffic on the adapter by issuing a
1959 * global reset on the MAC and deallocates TX/RX buffers.
1961 **********************************************************************/
1964 ixgbe_stop(void *arg)
1967 struct adapter *adapter = arg;
1968 struct ixgbe_hw *hw = &adapter->hw;
1971 mtx_assert(&adapter->core_mtx, MA_OWNED);
1973 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1974 ixgbe_disable_intr(adapter);
1976 /* Tell the stack that the interface is no longer active */
1977 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1980 hw->adapter_stopped = FALSE;
1981 ixgbe_stop_adapter(hw);
1982 /* Turn off the laser */
1983 if (hw->phy.multispeed_fiber)
1984 ixgbe_disable_tx_laser(hw);
1985 callout_stop(&adapter->timer);
1987 /* reprogram the RAR[0] in case user changed it. */
1988 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1994 /*********************************************************************
1996 * Determine hardware revision.
1998 **********************************************************************/
2000 ixgbe_identify_hardware(struct adapter *adapter)
2002 device_t dev = adapter->dev;
2003 struct ixgbe_hw *hw = &adapter->hw;
2005 /* Save off the information about this board */
2006 hw->vendor_id = pci_get_vendor(dev);
2007 hw->device_id = pci_get_device(dev);
2008 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2009 hw->subsystem_vendor_id =
2010 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2011 hw->subsystem_device_id =
2012 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2014 /* We need this here to set the num_segs below */
2015 ixgbe_set_mac_type(hw);
2017 /* Pick up the 82599 and VF settings */
2018 if (hw->mac.type != ixgbe_mac_82598EB) {
2019 hw->phy.smart_speed = ixgbe_smart_speed;
2020 adapter->num_segs = IXGBE_82599_SCATTER;
2022 adapter->num_segs = IXGBE_82598_SCATTER;
2027 /*********************************************************************
2029 * Determine optic type
2031 **********************************************************************/
2033 ixgbe_setup_optics(struct adapter *adapter)
2035 struct ixgbe_hw *hw = &adapter->hw;
2038 layer = ixgbe_get_supported_physical_layer(hw);
2040 case IXGBE_PHYSICAL_LAYER_10GBASE_T:
2041 adapter->optics = IFM_10G_T;
2043 case IXGBE_PHYSICAL_LAYER_1000BASE_T:
2044 adapter->optics = IFM_1000_T;
2046 case IXGBE_PHYSICAL_LAYER_10GBASE_LR:
2047 case IXGBE_PHYSICAL_LAYER_10GBASE_LRM:
2048 adapter->optics = IFM_10G_LR;
2050 case IXGBE_PHYSICAL_LAYER_10GBASE_SR:
2051 adapter->optics = IFM_10G_SR;
2053 case IXGBE_PHYSICAL_LAYER_10GBASE_KX4:
2054 case IXGBE_PHYSICAL_LAYER_10GBASE_CX4:
2055 adapter->optics = IFM_10G_CX4;
2057 case IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU:
2058 adapter->optics = IFM_10G_TWINAX;
2060 case IXGBE_PHYSICAL_LAYER_1000BASE_KX:
2061 case IXGBE_PHYSICAL_LAYER_10GBASE_KR:
2062 case IXGBE_PHYSICAL_LAYER_10GBASE_XAUI:
2063 case IXGBE_PHYSICAL_LAYER_UNKNOWN:
2065 adapter->optics = IFM_ETHER | IFM_AUTO;
2071 /*********************************************************************
2073 * Setup the Legacy or MSI Interrupt handler
2075 **********************************************************************/
2077 ixgbe_allocate_legacy(struct adapter *adapter)
2079 device_t dev = adapter->dev;
2080 struct ix_queue *que = adapter->queues;
2084 if (adapter->msix == 1)
2087 /* We allocate a single interrupt resource */
2088 adapter->res = bus_alloc_resource_any(dev,
2089 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2090 if (adapter->res == NULL) {
2091 device_printf(dev, "Unable to allocate bus resource: "
2097 * Try allocating a fast interrupt and the associated deferred
2098 * processing contexts.
2100 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2101 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2102 taskqueue_thread_enqueue, &que->tq);
2103 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2104 device_get_nameunit(adapter->dev));
2106 /* Tasklets for Link, SFP and Multispeed Fiber */
2107 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2108 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2109 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2111 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2113 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2114 taskqueue_thread_enqueue, &adapter->tq);
2115 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2116 device_get_nameunit(adapter->dev));
2118 if ((error = bus_setup_intr(dev, adapter->res,
2119 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2120 que, &adapter->tag)) != 0) {
2121 device_printf(dev, "Failed to register fast interrupt "
2122 "handler: %d\n", error);
2123 taskqueue_free(que->tq);
2124 taskqueue_free(adapter->tq);
2129 /* For simplicity in the handlers */
2130 adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
2136 /*********************************************************************
2138 * Setup MSIX Interrupt resources and handlers
2140 **********************************************************************/
2142 ixgbe_allocate_msix(struct adapter *adapter)
2144 device_t dev = adapter->dev;
2145 struct ix_queue *que = adapter->queues;
2146 int error, rid, vector = 0;
2148 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
2150 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2151 RF_SHAREABLE | RF_ACTIVE);
2152 if (que->res == NULL) {
2153 device_printf(dev,"Unable to allocate"
2154 " bus resource: que interrupt [%d]\n", vector);
2157 /* Set the handler function */
2158 error = bus_setup_intr(dev, que->res,
2159 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2160 ixgbe_msix_que, que, &que->tag);
2163 device_printf(dev, "Failed to register QUE handler");
2166 #if __FreeBSD_version >= 800504
2167 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2170 adapter->que_mask |= (u64)(1 << que->msix);
2172 ** Bind the msix vector, and thus the
2173 ** ring to the corresponding cpu.
2175 if (adapter->num_queues > 1)
2176 bus_bind_intr(dev, que->res, i);
2178 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2179 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2180 taskqueue_thread_enqueue, &que->tq);
2181 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2182 device_get_nameunit(adapter->dev));
2187 adapter->res = bus_alloc_resource_any(dev,
2188 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2189 if (!adapter->res) {
2190 device_printf(dev,"Unable to allocate"
2191 " bus resource: Link interrupt [%d]\n", rid);
2194 /* Set the link handler function */
2195 error = bus_setup_intr(dev, adapter->res,
2196 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2197 ixgbe_msix_link, adapter, &adapter->tag);
2199 adapter->res = NULL;
2200 device_printf(dev, "Failed to register LINK handler");
2203 #if __FreeBSD_version >= 800504
2204 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2206 adapter->linkvec = vector;
2207 /* Tasklets for Link, SFP and Multispeed Fiber */
2208 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2209 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2210 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2212 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2214 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2215 taskqueue_thread_enqueue, &adapter->tq);
2216 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2217 device_get_nameunit(adapter->dev));
2223 * Setup Either MSI/X or MSI
2226 ixgbe_setup_msix(struct adapter *adapter)
2228 device_t dev = adapter->dev;
2229 int rid, want, queues, msgs;
2231 /* Override by tuneable */
2232 if (ixgbe_enable_msix == 0)
2235 /* First try MSI/X */
2236 rid = PCIR_BAR(MSIX_82598_BAR);
2237 adapter->msix_mem = bus_alloc_resource_any(dev,
2238 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2239 if (!adapter->msix_mem) {
2240 rid += 4; /* 82599 maps in higher BAR */
2241 adapter->msix_mem = bus_alloc_resource_any(dev,
2242 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2244 if (!adapter->msix_mem) {
2245 /* May not be enabled */
2246 device_printf(adapter->dev,
2247 "Unable to map MSIX table \n");
2251 msgs = pci_msix_count(dev);
2252 if (msgs == 0) { /* system has msix disabled */
2253 bus_release_resource(dev, SYS_RES_MEMORY,
2254 rid, adapter->msix_mem);
2255 adapter->msix_mem = NULL;
2259 /* Figure out a reasonable auto config value */
2260 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2262 if (ixgbe_num_queues != 0)
2263 queues = ixgbe_num_queues;
2264 /* Set max queues to 8 when autoconfiguring */
2265 else if ((ixgbe_num_queues == 0) && (queues > 8))
2269 ** Want one vector (RX/TX pair) per queue
2270 ** plus an additional for Link.
2276 device_printf(adapter->dev,
2277 "MSIX Configuration Problem, "
2278 "%d vectors but %d queues wanted!\n",
2280 return (0); /* Will go to Legacy setup */
2282 if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
2283 device_printf(adapter->dev,
2284 "Using MSIX interrupts with %d vectors\n", msgs);
2285 adapter->num_queues = queues;
2289 msgs = pci_msi_count(dev);
2290 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2291 device_printf(adapter->dev,"Using an MSI interrupt\n");
2293 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2299 ixgbe_allocate_pci_resources(struct adapter *adapter)
2302 device_t dev = adapter->dev;
2305 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2308 if (!(adapter->pci_mem)) {
2309 device_printf(dev,"Unable to allocate bus resource: memory\n");
2313 adapter->osdep.mem_bus_space_tag =
2314 rman_get_bustag(adapter->pci_mem);
2315 adapter->osdep.mem_bus_space_handle =
2316 rman_get_bushandle(adapter->pci_mem);
2317 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2319 /* Legacy defaults */
2320 adapter->num_queues = 1;
2321 adapter->hw.back = &adapter->osdep;
2324 ** Now setup MSI or MSI/X, should
2325 ** return us the number of supported
2326 ** vectors. (Will be 1 for MSI)
2328 adapter->msix = ixgbe_setup_msix(adapter);
2333 ixgbe_free_pci_resources(struct adapter * adapter)
2335 struct ix_queue *que = adapter->queues;
2336 device_t dev = adapter->dev;
2339 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2340 memrid = PCIR_BAR(MSIX_82598_BAR);
2342 memrid = PCIR_BAR(MSIX_82599_BAR);
2345 ** There is a slight possibility of a failure mode
2346 ** in attach that will result in entering this function
2347 ** before interrupt resources have been initialized, and
2348 ** in that case we do not want to execute the loops below
2349 ** We can detect this reliably by the state of the adapter
2352 if (adapter->res == NULL)
2356 ** Release all msix queue resources:
2358 for (int i = 0; i < adapter->num_queues; i++, que++) {
2359 rid = que->msix + 1;
2360 if (que->tag != NULL) {
2361 bus_teardown_intr(dev, que->res, que->tag);
2364 if (que->res != NULL)
2365 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2369 /* Clean the Legacy or Link interrupt last */
2370 if (adapter->linkvec) /* we are doing MSIX */
2371 rid = adapter->linkvec + 1;
2373 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2375 if (adapter->tag != NULL) {
2376 bus_teardown_intr(dev, adapter->res, adapter->tag);
2377 adapter->tag = NULL;
2379 if (adapter->res != NULL)
2380 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2384 pci_release_msi(dev);
2386 if (adapter->msix_mem != NULL)
2387 bus_release_resource(dev, SYS_RES_MEMORY,
2388 memrid, adapter->msix_mem);
2390 if (adapter->pci_mem != NULL)
2391 bus_release_resource(dev, SYS_RES_MEMORY,
2392 PCIR_BAR(0), adapter->pci_mem);
2397 /*********************************************************************
2399 * Setup networking device structure and register an interface.
2401 **********************************************************************/
2403 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2405 struct ixgbe_hw *hw = &adapter->hw;
2408 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2410 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2412 device_printf(dev, "can not allocate ifnet structure\n");
2415 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2416 ifp->if_mtu = ETHERMTU;
2417 ifp->if_baudrate = 1000000000;
2418 ifp->if_init = ixgbe_init;
2419 ifp->if_softc = adapter;
2420 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2421 ifp->if_ioctl = ixgbe_ioctl;
2422 ifp->if_start = ixgbe_start;
2423 #if __FreeBSD_version >= 800000
2424 ifp->if_transmit = ixgbe_mq_start;
2425 ifp->if_qflush = ixgbe_qflush;
2427 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
2429 ether_ifattach(ifp, adapter->hw.mac.addr);
2431 adapter->max_frame_size =
2432 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2435 * Tell the upper layer(s) we support long frames.
2437 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2439 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
2440 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2441 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2444 ifp->if_capenable = ifp->if_capabilities;
2446 /* Don't enable LRO by default */
2447 ifp->if_capabilities |= IFCAP_LRO;
2450 ** Don't turn this on by default, if vlans are
2451 ** created on another pseudo device (eg. lagg)
2452 ** then vlan events are not passed thru, breaking
2453 ** operation, but with HW FILTER off it works. If
2454 ** using vlans directly on the ixgbe driver you can
2455 ** enable this and get full hardware tag filtering.
2457 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2460 * Specify the media types supported by this adapter and register
2461 * callbacks to update media and link information
2463 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2464 ixgbe_media_status);
2465 ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
2466 ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
2467 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2468 ifmedia_add(&adapter->media,
2469 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2470 ifmedia_add(&adapter->media,
2471 IFM_ETHER | IFM_1000_T, 0, NULL);
2473 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2474 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2480 ixgbe_config_link(struct adapter *adapter)
2482 struct ixgbe_hw *hw = &adapter->hw;
2483 u32 autoneg, err = 0;
2484 bool sfp, negotiate;
2486 sfp = ixgbe_is_sfp(hw);
2489 if (hw->phy.multispeed_fiber) {
2490 hw->mac.ops.setup_sfp(hw);
2491 ixgbe_enable_tx_laser(hw);
2492 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2494 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2496 if (hw->mac.ops.check_link)
2497 err = ixgbe_check_link(hw, &autoneg,
2498 &adapter->link_up, FALSE);
2501 autoneg = hw->phy.autoneg_advertised;
2502 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2503 err = hw->mac.ops.get_link_capabilities(hw,
2504 &autoneg, &negotiate);
2507 if (hw->mac.ops.setup_link)
2508 err = hw->mac.ops.setup_link(hw, autoneg,
2509 negotiate, adapter->link_up);
2515 /********************************************************************
2516 * Manage DMA'able memory.
2517 *******************************************************************/
2519 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2523 *(bus_addr_t *) arg = segs->ds_addr;
2528 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2529 struct ixgbe_dma_alloc *dma, int mapflags)
2531 device_t dev = adapter->dev;
2534 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2535 DBA_ALIGN, 0, /* alignment, bounds */
2536 BUS_SPACE_MAXADDR, /* lowaddr */
2537 BUS_SPACE_MAXADDR, /* highaddr */
2538 NULL, NULL, /* filter, filterarg */
2541 size, /* maxsegsize */
2542 BUS_DMA_ALLOCNOW, /* flags */
2543 NULL, /* lockfunc */
2544 NULL, /* lockfuncarg */
2547 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2551 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2552 BUS_DMA_NOWAIT, &dma->dma_map);
2554 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2558 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2562 mapflags | BUS_DMA_NOWAIT);
2564 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2568 dma->dma_size = size;
2571 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2573 bus_dma_tag_destroy(dma->dma_tag);
2575 dma->dma_map = NULL;
2576 dma->dma_tag = NULL;
2581 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2583 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2584 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2585 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2586 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2587 bus_dma_tag_destroy(dma->dma_tag);
2591 /*********************************************************************
2593 * Allocate memory for the transmit and receive rings, and then
2594 * the descriptors associated with each, called only once at attach.
2596 **********************************************************************/
2598 ixgbe_allocate_queues(struct adapter *adapter)
2600 device_t dev = adapter->dev;
2601 struct ix_queue *que;
2602 struct tx_ring *txr;
2603 struct rx_ring *rxr;
2604 int rsize, tsize, error = IXGBE_SUCCESS;
2605 int txconf = 0, rxconf = 0;
2607 /* First allocate the top level queue structs */
2608 if (!(adapter->queues =
2609 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2610 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2611 device_printf(dev, "Unable to allocate queue memory\n");
2616 /* First allocate the TX ring struct memory */
2617 if (!(adapter->tx_rings =
2618 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2619 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2620 device_printf(dev, "Unable to allocate TX ring memory\n");
2625 /* Next allocate the RX */
2626 if (!(adapter->rx_rings =
2627 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2628 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2629 device_printf(dev, "Unable to allocate RX ring memory\n");
2634 /* For the ring itself */
2635 tsize = roundup2(adapter->num_tx_desc *
2636 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2639 * Now set up the TX queues, txconf is needed to handle the
2640 * possibility that things fail midcourse and we need to
2641 * undo memory gracefully
2643 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2644 /* Set up some basics */
2645 txr = &adapter->tx_rings[i];
2646 txr->adapter = adapter;
2649 /* Initialize the TX side lock */
2650 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2651 device_get_nameunit(dev), txr->me);
2652 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2654 if (ixgbe_dma_malloc(adapter, tsize,
2655 &txr->txdma, BUS_DMA_NOWAIT)) {
2657 "Unable to allocate TX Descriptor memory\n");
2661 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2662 bzero((void *)txr->tx_base, tsize);
2664 /* Now allocate transmit buffers for the ring */
2665 if (ixgbe_allocate_transmit_buffers(txr)) {
2667 "Critical Failure setting up transmit buffers\n");
2671 #if __FreeBSD_version >= 800000
2672 /* Allocate a buf ring */
2673 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2674 M_WAITOK, &txr->tx_mtx);
2675 if (txr->br == NULL) {
2677 "Critical Failure setting up buf ring\n");
2685 * Next the RX queues...
2687 rsize = roundup2(adapter->num_rx_desc *
2688 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2689 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2690 rxr = &adapter->rx_rings[i];
2691 /* Set up some basics */
2692 rxr->adapter = adapter;
2695 /* Initialize the RX side lock */
2696 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2697 device_get_nameunit(dev), rxr->me);
2698 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2700 if (ixgbe_dma_malloc(adapter, rsize,
2701 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2703 "Unable to allocate RxDescriptor memory\n");
2707 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2708 bzero((void *)rxr->rx_base, rsize);
2710 /* Allocate receive buffers for the ring*/
2711 if (ixgbe_allocate_receive_buffers(rxr)) {
2713 "Critical Failure setting up receive buffers\n");
2720 ** Finally set up the queue holding structs
2722 for (int i = 0; i < adapter->num_queues; i++) {
2723 que = &adapter->queues[i];
2724 que->adapter = adapter;
2725 que->txr = &adapter->tx_rings[i];
2726 que->rxr = &adapter->rx_rings[i];
2732 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2733 ixgbe_dma_free(adapter, &rxr->rxdma);
2735 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2736 ixgbe_dma_free(adapter, &txr->txdma);
2737 free(adapter->rx_rings, M_DEVBUF);
2739 free(adapter->tx_rings, M_DEVBUF);
2741 free(adapter->queues, M_DEVBUF);
2746 /*********************************************************************
2748 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2749 * the information needed to transmit a packet on the wire. This is
2750 * called only once at attach, setup is done every reset.
2752 **********************************************************************/
2754 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2756 struct adapter *adapter = txr->adapter;
2757 device_t dev = adapter->dev;
2758 struct ixgbe_tx_buf *txbuf;
2762 * Setup DMA descriptor areas.
2764 if ((error = bus_dma_tag_create(NULL, /* parent */
2765 1, 0, /* alignment, bounds */
2766 BUS_SPACE_MAXADDR, /* lowaddr */
2767 BUS_SPACE_MAXADDR, /* highaddr */
2768 NULL, NULL, /* filter, filterarg */
2769 IXGBE_TSO_SIZE, /* maxsize */
2770 adapter->num_segs, /* nsegments */
2771 PAGE_SIZE, /* maxsegsize */
2773 NULL, /* lockfunc */
2774 NULL, /* lockfuncarg */
2776 device_printf(dev,"Unable to allocate TX DMA tag\n");
2780 if (!(txr->tx_buffers =
2781 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2782 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2783 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2788 /* Create the descriptor buffer dma maps */
2789 txbuf = txr->tx_buffers;
2790 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2791 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2793 device_printf(dev, "Unable to create TX DMA map\n");
2800 /* We free all, it handles case where we are in the middle */
2801 ixgbe_free_transmit_structures(adapter);
2805 /*********************************************************************
2807 * Initialize a transmit ring.
2809 **********************************************************************/
2811 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2813 struct adapter *adapter = txr->adapter;
2814 struct ixgbe_tx_buf *txbuf;
2817 /* Clear the old ring contents */
2819 bzero((void *)txr->tx_base,
2820 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2822 txr->next_avail_desc = 0;
2823 txr->next_to_clean = 0;
2825 /* Free any existing tx buffers. */
2826 txbuf = txr->tx_buffers;
2827 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2828 if (txbuf->m_head != NULL) {
2829 bus_dmamap_sync(txr->txtag, txbuf->map,
2830 BUS_DMASYNC_POSTWRITE);
2831 bus_dmamap_unload(txr->txtag, txbuf->map);
2832 m_freem(txbuf->m_head);
2833 txbuf->m_head = NULL;
2835 /* Clear the EOP index */
2836 txbuf->eop_index = -1;
2840 /* Set the rate at which we sample packets */
2841 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2842 txr->atr_sample = atr_sample_rate;
2845 /* Set number of descriptors available */
2846 txr->tx_avail = adapter->num_tx_desc;
2848 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2849 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2850 IXGBE_TX_UNLOCK(txr);
2853 /*********************************************************************
2855 * Initialize all transmit rings.
2857 **********************************************************************/
2859 ixgbe_setup_transmit_structures(struct adapter *adapter)
2861 struct tx_ring *txr = adapter->tx_rings;
2863 for (int i = 0; i < adapter->num_queues; i++, txr++)
2864 ixgbe_setup_transmit_ring(txr);
2869 /*********************************************************************
2871 * Enable transmit unit.
2873 **********************************************************************/
2875 ixgbe_initialize_transmit_units(struct adapter *adapter)
2877 struct tx_ring *txr = adapter->tx_rings;
2878 struct ixgbe_hw *hw = &adapter->hw;
2880 /* Setup the Base and Length of the Tx Descriptor Ring */
2882 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2883 u64 tdba = txr->txdma.dma_paddr;
2886 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2887 (tdba & 0x00000000ffffffffULL));
2888 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2889 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2890 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2892 /* Setup the HW Tx Head and Tail descriptor pointers */
2893 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2894 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2896 /* Setup Transmit Descriptor Cmd Settings */
2897 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2898 txr->queue_status = IXGBE_QUEUE_IDLE;
2900 /* Disable Head Writeback */
2901 switch (hw->mac.type) {
2902 case ixgbe_mac_82598EB:
2903 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2905 case ixgbe_mac_82599EB:
2907 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2910 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2911 switch (hw->mac.type) {
2912 case ixgbe_mac_82598EB:
2913 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2915 case ixgbe_mac_82599EB:
2917 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2923 if (hw->mac.type != ixgbe_mac_82598EB) {
2924 u32 dmatxctl, rttdcs;
2925 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2926 dmatxctl |= IXGBE_DMATXCTL_TE;
2927 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2928 /* Disable arbiter to set MTQC */
2929 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2930 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2931 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2932 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2933 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2934 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2940 /*********************************************************************
2942 * Free all transmit rings.
2944 **********************************************************************/
2946 ixgbe_free_transmit_structures(struct adapter *adapter)
2948 struct tx_ring *txr = adapter->tx_rings;
2950 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2952 ixgbe_free_transmit_buffers(txr);
2953 ixgbe_dma_free(adapter, &txr->txdma);
2954 IXGBE_TX_UNLOCK(txr);
2955 IXGBE_TX_LOCK_DESTROY(txr);
2957 free(adapter->tx_rings, M_DEVBUF);
2960 /*********************************************************************
2962 * Free transmit ring related data structures.
2964 **********************************************************************/
2966 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2968 struct adapter *adapter = txr->adapter;
2969 struct ixgbe_tx_buf *tx_buffer;
2972 INIT_DEBUGOUT("free_transmit_ring: begin");
2974 if (txr->tx_buffers == NULL)
2977 tx_buffer = txr->tx_buffers;
2978 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2979 if (tx_buffer->m_head != NULL) {
2980 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2981 BUS_DMASYNC_POSTWRITE);
2982 bus_dmamap_unload(txr->txtag,
2984 m_freem(tx_buffer->m_head);
2985 tx_buffer->m_head = NULL;
2986 if (tx_buffer->map != NULL) {
2987 bus_dmamap_destroy(txr->txtag,
2989 tx_buffer->map = NULL;
2991 } else if (tx_buffer->map != NULL) {
2992 bus_dmamap_unload(txr->txtag,
2994 bus_dmamap_destroy(txr->txtag,
2996 tx_buffer->map = NULL;
2999 #if __FreeBSD_version >= 800000
3000 if (txr->br != NULL)
3001 buf_ring_free(txr->br, M_DEVBUF);
3003 if (txr->tx_buffers != NULL) {
3004 free(txr->tx_buffers, M_DEVBUF);
3005 txr->tx_buffers = NULL;
3007 if (txr->txtag != NULL) {
3008 bus_dma_tag_destroy(txr->txtag);
3014 /*********************************************************************
3016 * Advanced Context Descriptor setup for VLAN or CSUM
3018 **********************************************************************/
3021 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
3023 struct adapter *adapter = txr->adapter;
3024 struct ixgbe_adv_tx_context_desc *TXD;
3025 struct ixgbe_tx_buf *tx_buffer;
3026 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3027 struct ether_vlan_header *eh;
3029 struct ip6_hdr *ip6;
3030 int ehdrlen, ip_hlen = 0;
3033 bool offload = TRUE;
3034 int ctxd = txr->next_avail_desc;
3038 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3041 tx_buffer = &txr->tx_buffers[ctxd];
3042 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3045 ** In advanced descriptors the vlan tag must
3046 ** be placed into the descriptor itself.
3048 if (mp->m_flags & M_VLANTAG) {
3049 vtag = htole16(mp->m_pkthdr.ether_vtag);
3050 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3051 } else if (offload == FALSE)
3055 * Determine where frame payload starts.
3056 * Jump over vlan headers if already present,
3057 * helpful for QinQ too.
3059 eh = mtod(mp, struct ether_vlan_header *);
3060 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3061 etype = ntohs(eh->evl_proto);
3062 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3064 etype = ntohs(eh->evl_encap_proto);
3065 ehdrlen = ETHER_HDR_LEN;
3068 /* Set the ether header length */
3069 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3073 ip = (struct ip *)(mp->m_data + ehdrlen);
3074 ip_hlen = ip->ip_hl << 2;
3076 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3078 case ETHERTYPE_IPV6:
3079 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3080 ip_hlen = sizeof(struct ip6_hdr);
3081 ipproto = ip6->ip6_nxt;
3082 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3089 vlan_macip_lens |= ip_hlen;
3090 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3094 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3095 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3099 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3100 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
3103 #if __FreeBSD_version >= 800000
3105 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
3106 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3114 /* Now copy bits into descriptor */
3115 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3116 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3117 TXD->seqnum_seed = htole32(0);
3118 TXD->mss_l4len_idx = htole32(0);
3120 tx_buffer->m_head = NULL;
3121 tx_buffer->eop_index = -1;
3123 /* We've consumed the first desc, adjust counters */
3124 if (++ctxd == adapter->num_tx_desc)
3126 txr->next_avail_desc = ctxd;
3132 /**********************************************************************
3134 * Setup work for hardware segmentation offload (TSO) on
3135 * adapters using advanced tx descriptors
3137 **********************************************************************/
3139 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3141 struct adapter *adapter = txr->adapter;
3142 struct ixgbe_adv_tx_context_desc *TXD;
3143 struct ixgbe_tx_buf *tx_buffer;
3144 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3145 u32 mss_l4len_idx = 0;
3147 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
3148 struct ether_vlan_header *eh;
3154 * Determine where frame payload starts.
3155 * Jump over vlan headers if already present
3157 eh = mtod(mp, struct ether_vlan_header *);
3158 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3159 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3161 ehdrlen = ETHER_HDR_LEN;
3163 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3164 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3167 ctxd = txr->next_avail_desc;
3168 tx_buffer = &txr->tx_buffers[ctxd];
3169 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3171 ip = (struct ip *)(mp->m_data + ehdrlen);
3172 if (ip->ip_p != IPPROTO_TCP)
3173 return FALSE; /* 0 */
3175 ip_hlen = ip->ip_hl << 2;
3176 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3177 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3178 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3179 tcp_hlen = th->th_off << 2;
3180 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3182 /* This is used in the transmit desc in encap */
3183 *paylen = mp->m_pkthdr.len - hdrlen;
3185 /* VLAN MACLEN IPLEN */
3186 if (mp->m_flags & M_VLANTAG) {
3187 vtag = htole16(mp->m_pkthdr.ether_vtag);
3188 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3191 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3192 vlan_macip_lens |= ip_hlen;
3193 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3195 /* ADV DTYPE TUCMD */
3196 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3197 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3198 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3199 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3203 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3204 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3205 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3207 TXD->seqnum_seed = htole32(0);
3208 tx_buffer->m_head = NULL;
3209 tx_buffer->eop_index = -1;
3211 if (++ctxd == adapter->num_tx_desc)
3215 txr->next_avail_desc = ctxd;
3221 ** This routine parses packet headers so that Flow
3222 ** Director can make a hashed filter table entry
3223 ** allowing traffic flows to be identified and kept
3224 ** on the same cpu. This would be a performance
3225 ** hit, but we only do it at IXGBE_FDIR_RATE of
3229 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
3231 struct adapter *adapter = txr->adapter;
3232 struct ix_queue *que;
3236 struct ether_vlan_header *eh;
3237 union ixgbe_atr_hash_dword input = {.dword = 0};
3238 union ixgbe_atr_hash_dword common = {.dword = 0};
3239 int ehdrlen, ip_hlen;
3242 eh = mtod(mp, struct ether_vlan_header *);
3243 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3244 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3245 etype = eh->evl_proto;
3247 ehdrlen = ETHER_HDR_LEN;
3248 etype = eh->evl_encap_proto;
3251 /* Only handling IPv4 */
3252 if (etype != htons(ETHERTYPE_IP))
3255 ip = (struct ip *)(mp->m_data + ehdrlen);
3256 ip_hlen = ip->ip_hl << 2;
3258 /* check if we're UDP or TCP */
3261 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3262 /* src and dst are inverted */
3263 common.port.dst ^= th->th_sport;
3264 common.port.src ^= th->th_dport;
3265 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
3268 uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
3269 /* src and dst are inverted */
3270 common.port.dst ^= uh->uh_sport;
3271 common.port.src ^= uh->uh_dport;
3272 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
3278 input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
3279 if (mp->m_pkthdr.ether_vtag)
3280 common.flex_bytes ^= htons(ETHERTYPE_VLAN);
3282 common.flex_bytes ^= etype;
3283 common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
3285 que = &adapter->queues[txr->me];
3287 ** This assumes the Rx queue and Tx
3288 ** queue are bound to the same CPU
3290 ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
3291 input, common, que->msix);
3293 #endif /* IXGBE_FDIR */
3295 /**********************************************************************
3297 * Examine each tx_buffer in the used queue. If the hardware is done
3298 * processing the packet then free associated resources. The
3299 * tx_buffer is put back on the free queue.
3301 **********************************************************************/
3303 ixgbe_txeof(struct tx_ring *txr)
3305 struct adapter *adapter = txr->adapter;
3306 struct ifnet *ifp = adapter->ifp;
3307 u32 first, last, done, processed;
3308 struct ixgbe_tx_buf *tx_buffer;
3309 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3311 mtx_assert(&txr->tx_mtx, MA_OWNED);
3313 if (txr->tx_avail == adapter->num_tx_desc) {
3314 txr->queue_status = IXGBE_QUEUE_IDLE;
3319 first = txr->next_to_clean;
3320 tx_buffer = &txr->tx_buffers[first];
3321 /* For cleanup we just use legacy struct */
3322 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3323 last = tx_buffer->eop_index;
3326 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3329 ** Get the index of the first descriptor
3330 ** BEYOND the EOP and call that 'done'.
3331 ** I do this so the comparison in the
3332 ** inner while loop below can be simple
3334 if (++last == adapter->num_tx_desc) last = 0;
3337 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3338 BUS_DMASYNC_POSTREAD);
3340 ** Only the EOP descriptor of a packet now has the DD
3341 ** bit set, this is what we look for...
3343 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3344 /* We clean the range of the packet */
3345 while (first != done) {
3346 tx_desc->upper.data = 0;
3347 tx_desc->lower.data = 0;
3348 tx_desc->buffer_addr = 0;
3352 if (tx_buffer->m_head) {
3354 tx_buffer->m_head->m_pkthdr.len;
3355 bus_dmamap_sync(txr->txtag,
3357 BUS_DMASYNC_POSTWRITE);
3358 bus_dmamap_unload(txr->txtag,
3360 m_freem(tx_buffer->m_head);
3361 tx_buffer->m_head = NULL;
3362 tx_buffer->map = NULL;
3364 tx_buffer->eop_index = -1;
3365 txr->watchdog_time = ticks;
3367 if (++first == adapter->num_tx_desc)
3370 tx_buffer = &txr->tx_buffers[first];
3372 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3376 /* See if there is more work now */
3377 last = tx_buffer->eop_index;
3380 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3381 /* Get next done point */
3382 if (++last == adapter->num_tx_desc) last = 0;
3387 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3388 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3390 txr->next_to_clean = first;
3393 ** Watchdog calculation, we know there's
3394 ** work outstanding or the first return
3395 ** would have been taken, so none processed
3396 ** for too long indicates a hang.
3398 if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
3399 txr->queue_status = IXGBE_QUEUE_HUNG;
3402 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
3403 * it is OK to send packets. If there are no pending descriptors,
3404 * clear the timeout. Otherwise, if some descriptors have been freed,
3405 * restart the timeout.
3407 if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
3408 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3409 if (txr->tx_avail == adapter->num_tx_desc) {
3410 txr->queue_status = IXGBE_QUEUE_IDLE;
3418 /*********************************************************************
3420 * Refresh mbuf buffers for RX descriptor rings
3421 * - now keeps its own state so discards due to resource
3422 * exhaustion are unnecessary, if an mbuf cannot be obtained
3423 * it just returns, keeping its placeholder, thus it can simply
3424 * be recalled to try again.
3426 **********************************************************************/
3428 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
3430 struct adapter *adapter = rxr->adapter;
3431 bus_dma_segment_t hseg[1];
3432 bus_dma_segment_t pseg[1];
3433 struct ixgbe_rx_buf *rxbuf;
3434 struct mbuf *mh, *mp;
3435 int i, j, nsegs, error;
3436 bool refreshed = FALSE;
3438 i = j = rxr->next_to_refresh;
3439 /* Control the loop with one beyond */
3440 if (++j == adapter->num_rx_desc)
3443 while (j != limit) {
3444 rxbuf = &rxr->rx_buffers[i];
3445 if (rxr->hdr_split == FALSE)
3448 if (rxbuf->m_head == NULL) {
3449 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3455 mh->m_pkthdr.len = mh->m_len = MHLEN;
3457 mh->m_flags |= M_PKTHDR;
3458 /* Get the memory mapping */
3459 error = bus_dmamap_load_mbuf_sg(rxr->htag,
3460 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
3462 printf("Refresh mbufs: hdr dmamap load"
3463 " failure - %d\n", error);
3465 rxbuf->m_head = NULL;
3469 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3470 BUS_DMASYNC_PREREAD);
3471 rxr->rx_base[i].read.hdr_addr =
3472 htole64(hseg[0].ds_addr);
3475 if (rxbuf->m_pack == NULL) {
3476 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3477 M_PKTHDR, adapter->rx_mbuf_sz);
3483 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3484 /* Get the memory mapping */
3485 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
3486 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
3488 printf("Refresh mbufs: payload dmamap load"
3489 " failure - %d\n", error);
3491 rxbuf->m_pack = NULL;
3495 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3496 BUS_DMASYNC_PREREAD);
3497 rxr->rx_base[i].read.pkt_addr =
3498 htole64(pseg[0].ds_addr);
3501 /* Next is precalculated */
3503 rxr->next_to_refresh = i;
3504 if (++j == adapter->num_rx_desc)
3508 if (refreshed) /* Update hardware tail index */
3509 IXGBE_WRITE_REG(&adapter->hw,
3510 IXGBE_RDT(rxr->me), rxr->next_to_refresh);
3514 /*********************************************************************
3516 * Allocate memory for rx_buffer structures. Since we use one
3517 * rx_buffer per received packet, the maximum number of rx_buffer's
3518 * that we'll need is equal to the number of receive descriptors
3519 * that we've allocated.
3521 **********************************************************************/
3523 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3525 struct adapter *adapter = rxr->adapter;
3526 device_t dev = adapter->dev;
3527 struct ixgbe_rx_buf *rxbuf;
3528 int i, bsize, error;
3530 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3531 if (!(rxr->rx_buffers =
3532 (struct ixgbe_rx_buf *) malloc(bsize,
3533 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3534 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3539 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3540 1, 0, /* alignment, bounds */
3541 BUS_SPACE_MAXADDR, /* lowaddr */
3542 BUS_SPACE_MAXADDR, /* highaddr */
3543 NULL, NULL, /* filter, filterarg */
3544 MSIZE, /* maxsize */
3546 MSIZE, /* maxsegsize */
3548 NULL, /* lockfunc */
3549 NULL, /* lockfuncarg */
3551 device_printf(dev, "Unable to create RX DMA tag\n");
3555 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3556 1, 0, /* alignment, bounds */
3557 BUS_SPACE_MAXADDR, /* lowaddr */
3558 BUS_SPACE_MAXADDR, /* highaddr */
3559 NULL, NULL, /* filter, filterarg */
3560 MJUM16BYTES, /* maxsize */
3562 MJUM16BYTES, /* maxsegsize */
3564 NULL, /* lockfunc */
3565 NULL, /* lockfuncarg */
3567 device_printf(dev, "Unable to create RX DMA tag\n");
3571 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3572 rxbuf = &rxr->rx_buffers[i];
3573 error = bus_dmamap_create(rxr->htag,
3574 BUS_DMA_NOWAIT, &rxbuf->hmap);
3576 device_printf(dev, "Unable to create RX head map\n");
3579 error = bus_dmamap_create(rxr->ptag,
3580 BUS_DMA_NOWAIT, &rxbuf->pmap);
3582 device_printf(dev, "Unable to create RX pkt map\n");
3590 /* Frees all, but can handle partial completion */
3591 ixgbe_free_receive_structures(adapter);
3596 ** Used to detect a descriptor that has
3597 ** been merged by Hardware RSC.
3600 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
3602 return (le32toh(rx->wb.lower.lo_dword.data) &
3603 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
3606 /*********************************************************************
3608 * Initialize Hardware RSC (LRO) feature on 82599
3609 * for an RX ring, this is toggled by the LRO capability
3610 * even though it is transparent to the stack.
3612 **********************************************************************/
3614 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
3616 struct adapter *adapter = rxr->adapter;
3617 struct ixgbe_hw *hw = &adapter->hw;
3618 u32 rscctrl, rdrxctl;
3620 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3621 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3622 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3623 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
3624 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3626 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
3627 rscctrl |= IXGBE_RSCCTL_RSCEN;
3629 ** Limit the total number of descriptors that
3630 ** can be combined, so it does not exceed 64K
3632 if (adapter->rx_mbuf_sz == MCLBYTES)
3633 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3634 else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
3635 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
3636 else if (adapter->rx_mbuf_sz == MJUM9BYTES)
3637 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
3638 else /* Using 16K cluster */
3639 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
3641 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
3643 /* Enable TCP header recognition */
3644 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
3645 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
3646 IXGBE_PSRTYPE_TCPHDR));
3648 /* Disable RSC for ACK packets */
3649 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3650 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3657 ixgbe_free_receive_ring(struct rx_ring *rxr)
3659 struct adapter *adapter;
3660 struct ixgbe_rx_buf *rxbuf;
3663 adapter = rxr->adapter;
3664 for (i = 0; i < adapter->num_rx_desc; i++) {
3665 rxbuf = &rxr->rx_buffers[i];
3666 if (rxbuf->m_head != NULL) {
3667 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3668 BUS_DMASYNC_POSTREAD);
3669 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3670 rxbuf->m_head->m_flags |= M_PKTHDR;
3671 m_freem(rxbuf->m_head);
3673 if (rxbuf->m_pack != NULL) {
3674 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3675 BUS_DMASYNC_POSTREAD);
3676 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3677 rxbuf->m_pack->m_flags |= M_PKTHDR;
3678 m_freem(rxbuf->m_pack);
3680 rxbuf->m_head = NULL;
3681 rxbuf->m_pack = NULL;
3686 /*********************************************************************
3688 * Initialize a receive ring and its buffers.
3690 **********************************************************************/
3692 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3694 struct adapter *adapter;
3697 struct ixgbe_rx_buf *rxbuf;
3698 bus_dma_segment_t pseg[1], hseg[1];
3699 struct lro_ctrl *lro = &rxr->lro;
3700 int rsize, nsegs, error = 0;
3702 adapter = rxr->adapter;
3706 /* Clear the ring contents */
3708 rsize = roundup2(adapter->num_rx_desc *
3709 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3710 bzero((void *)rxr->rx_base, rsize);
3712 /* Free current RX buffer structs and their mbufs */
3713 ixgbe_free_receive_ring(rxr);
3715 /* Configure header split? */
3716 if (ixgbe_header_split)
3717 rxr->hdr_split = TRUE;
3719 /* Now replenish the mbufs */
3720 for (int j = 0; j != adapter->num_rx_desc; ++j) {
3721 struct mbuf *mh, *mp;
3723 rxbuf = &rxr->rx_buffers[j];
3725 ** Don't allocate mbufs if not
3726 ** doing header split, its wasteful
3728 if (rxr->hdr_split == FALSE)
3731 /* First the header */
3732 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
3733 if (rxbuf->m_head == NULL) {
3737 m_adj(rxbuf->m_head, ETHER_ALIGN);
3739 mh->m_len = mh->m_pkthdr.len = MHLEN;
3740 mh->m_flags |= M_PKTHDR;
3741 /* Get the memory mapping */
3742 error = bus_dmamap_load_mbuf_sg(rxr->htag,
3743 rxbuf->hmap, rxbuf->m_head, hseg,
3744 &nsegs, BUS_DMA_NOWAIT);
3745 if (error != 0) /* Nothing elegant to do here */
3747 bus_dmamap_sync(rxr->htag,
3748 rxbuf->hmap, BUS_DMASYNC_PREREAD);
3749 /* Update descriptor */
3750 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
3753 /* Now the payload cluster */
3754 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
3755 M_PKTHDR, adapter->rx_mbuf_sz);
3756 if (rxbuf->m_pack == NULL) {
3761 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3762 /* Get the memory mapping */
3763 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
3764 rxbuf->pmap, mp, pseg,
3765 &nsegs, BUS_DMA_NOWAIT);
3768 bus_dmamap_sync(rxr->ptag,
3769 rxbuf->pmap, BUS_DMASYNC_PREREAD);
3770 /* Update descriptor */
3771 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
3775 /* Setup our descriptor indices */
3776 rxr->next_to_check = 0;
3777 rxr->next_to_refresh = 0;
3778 rxr->lro_enabled = FALSE;
3779 rxr->rx_split_packets = 0;
3781 rxr->discard = FALSE;
3783 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3784 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3787 ** Now set up the LRO interface:
3788 ** 82598 uses software LRO, the
3789 ** 82599 uses a hardware assist.
3791 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
3792 (ifp->if_capenable & IFCAP_RXCSUM) &&
3793 (ifp->if_capenable & IFCAP_LRO))
3794 ixgbe_setup_hw_rsc(rxr);
3795 else if (ifp->if_capenable & IFCAP_LRO) {
3796 int err = tcp_lro_init(lro);
3798 device_printf(dev, "LRO Initialization failed!\n");
3801 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3802 rxr->lro_enabled = TRUE;
3803 lro->ifp = adapter->ifp;
3806 IXGBE_RX_UNLOCK(rxr);
3810 ixgbe_free_receive_ring(rxr);
3811 IXGBE_RX_UNLOCK(rxr);
3815 /*********************************************************************
3817 * Initialize all receive rings.
3819 **********************************************************************/
3821 ixgbe_setup_receive_structures(struct adapter *adapter)
3823 struct rx_ring *rxr = adapter->rx_rings;
3826 for (j = 0; j < adapter->num_queues; j++, rxr++)
3827 if (ixgbe_setup_receive_ring(rxr))
3833 * Free RX buffers allocated so far, we will only handle
3834 * the rings that completed, the failing case will have
3835 * cleaned up for itself. 'j' failed, so its the terminus.
3837 for (int i = 0; i < j; ++i) {
3838 rxr = &adapter->rx_rings[i];
3839 ixgbe_free_receive_ring(rxr);
3845 /*********************************************************************
3847 * Setup receive registers and features.
3849 **********************************************************************/
3850 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3852 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3855 ixgbe_initialize_receive_units(struct adapter *adapter)
3857 struct rx_ring *rxr = adapter->rx_rings;
3858 struct ixgbe_hw *hw = &adapter->hw;
3859 struct ifnet *ifp = adapter->ifp;
3860 u32 bufsz, rxctrl, fctrl, srrctl, rxcsum;
3861 u32 reta, mrqc = 0, hlreg, random[10];
3865 * Make sure receives are disabled while
3866 * setting up the descriptor ring
3868 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3869 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3870 rxctrl & ~IXGBE_RXCTRL_RXEN);
3872 /* Enable broadcasts */
3873 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3874 fctrl |= IXGBE_FCTRL_BAM;
3875 fctrl |= IXGBE_FCTRL_DPF;
3876 fctrl |= IXGBE_FCTRL_PMCF;
3877 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3879 /* Set for Jumbo Frames? */
3880 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3881 if (ifp->if_mtu > ETHERMTU)
3882 hlreg |= IXGBE_HLREG0_JUMBOEN;
3884 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3885 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3887 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3889 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3890 u64 rdba = rxr->rxdma.dma_paddr;
3892 /* Setup the Base and Length of the Rx Descriptor Ring */
3893 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3894 (rdba & 0x00000000ffffffffULL));
3895 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
3896 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3897 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3899 /* Set up the SRRCTL register */
3900 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
3901 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3902 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3904 if (rxr->hdr_split) {
3905 /* Use a standard mbuf for the header */
3906 srrctl |= ((IXGBE_RX_HDR <<
3907 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3908 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3909 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3911 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3912 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
3914 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3915 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3916 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3919 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3920 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3921 IXGBE_PSRTYPE_UDPHDR |
3922 IXGBE_PSRTYPE_IPV4HDR |
3923 IXGBE_PSRTYPE_IPV6HDR;
3924 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3927 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3930 if (adapter->num_queues > 1) {
3934 /* set up random bits */
3935 arc4rand(&random, sizeof(random), 0);
3937 /* Set up the redirection table */
3938 for (i = 0, j = 0; i < 128; i++, j++) {
3939 if (j == adapter->num_queues) j = 0;
3940 reta = (reta << 8) | (j * 0x11);
3942 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3945 /* Now fill our hash function seeds */
3946 for (int i = 0; i < 10; i++)
3947 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3949 /* Perform hash on these packet types */
3950 mrqc = IXGBE_MRQC_RSSEN
3951 | IXGBE_MRQC_RSS_FIELD_IPV4
3952 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3953 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3954 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3955 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3956 | IXGBE_MRQC_RSS_FIELD_IPV6
3957 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3958 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3959 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3960 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3962 /* RSS and RX IPP Checksum are mutually exclusive */
3963 rxcsum |= IXGBE_RXCSUM_PCSD;
3966 if (ifp->if_capenable & IFCAP_RXCSUM)
3967 rxcsum |= IXGBE_RXCSUM_PCSD;
3969 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3970 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3972 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3977 /*********************************************************************
3979 * Free all receive rings.
3981 **********************************************************************/
3983 ixgbe_free_receive_structures(struct adapter *adapter)
3985 struct rx_ring *rxr = adapter->rx_rings;
3987 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3988 struct lro_ctrl *lro = &rxr->lro;
3989 ixgbe_free_receive_buffers(rxr);
3990 /* Free LRO memory */
3992 /* Free the ring memory as well */
3993 ixgbe_dma_free(adapter, &rxr->rxdma);
3996 free(adapter->rx_rings, M_DEVBUF);
4000 /*********************************************************************
4002 * Free receive ring data structures
4004 **********************************************************************/
4006 ixgbe_free_receive_buffers(struct rx_ring *rxr)
4008 struct adapter *adapter = rxr->adapter;
4009 struct ixgbe_rx_buf *rxbuf;
4011 INIT_DEBUGOUT("free_receive_structures: begin");
4013 /* Cleanup any existing buffers */
4014 if (rxr->rx_buffers != NULL) {
4015 for (int i = 0; i < adapter->num_rx_desc; i++) {
4016 rxbuf = &rxr->rx_buffers[i];
4017 if (rxbuf->m_head != NULL) {
4018 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
4019 BUS_DMASYNC_POSTREAD);
4020 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
4021 rxbuf->m_head->m_flags |= M_PKTHDR;
4022 m_freem(rxbuf->m_head);
4024 if (rxbuf->m_pack != NULL) {
4025 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
4026 BUS_DMASYNC_POSTREAD);
4027 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
4028 rxbuf->m_pack->m_flags |= M_PKTHDR;
4029 m_freem(rxbuf->m_pack);
4031 rxbuf->m_head = NULL;
4032 rxbuf->m_pack = NULL;
4033 if (rxbuf->hmap != NULL) {
4034 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
4037 if (rxbuf->pmap != NULL) {
4038 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
4042 if (rxr->rx_buffers != NULL) {
4043 free(rxr->rx_buffers, M_DEVBUF);
4044 rxr->rx_buffers = NULL;
4048 if (rxr->htag != NULL) {
4049 bus_dma_tag_destroy(rxr->htag);
4052 if (rxr->ptag != NULL) {
4053 bus_dma_tag_destroy(rxr->ptag);
4060 static __inline void
4061 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
4065 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
4066 * should be computed by hardware. Also it should not have VLAN tag in
4069 if (rxr->lro_enabled &&
4070 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
4071 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
4072 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
4073 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
4074 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
4075 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
4077 * Send to the stack if:
4078 ** - LRO not enabled, or
4079 ** - no LRO resources, or
4080 ** - lro enqueue fails
4082 if (rxr->lro.lro_cnt != 0)
4083 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
4086 IXGBE_RX_UNLOCK(rxr);
4087 (*ifp->if_input)(ifp, m);
4091 static __inline void
4092 ixgbe_rx_discard(struct rx_ring *rxr, int i)
4094 struct ixgbe_rx_buf *rbuf;
4096 rbuf = &rxr->rx_buffers[i];
4098 if (rbuf->fmp != NULL) {/* Partial chain ? */
4099 rbuf->fmp->m_flags |= M_PKTHDR;
4105 ** With advanced descriptors the writeback
4106 ** clobbers the buffer addrs, so its easier
4107 ** to just free the existing mbufs and take
4108 ** the normal refresh path to get new buffers
4112 m_free(rbuf->m_head);
4113 rbuf->m_head = NULL;
4117 m_free(rbuf->m_pack);
4118 rbuf->m_pack = NULL;
4125 /*********************************************************************
4127 * This routine executes in interrupt context. It replenishes
4128 * the mbufs in the descriptor and sends data which has been
4129 * dma'ed into host memory to upper layer.
4131 * We loop at most count times if count is > 0, or until done if
4134 * Return TRUE for more work, FALSE for all clean.
4135 *********************************************************************/
4137 ixgbe_rxeof(struct ix_queue *que, int count)
4139 struct adapter *adapter = que->adapter;
4140 struct rx_ring *rxr = que->rxr;
4141 struct ifnet *ifp = adapter->ifp;
4142 struct lro_ctrl *lro = &rxr->lro;
4143 struct lro_entry *queued;
4144 int i, nextp, processed = 0;
4146 union ixgbe_adv_rx_desc *cur;
4147 struct ixgbe_rx_buf *rbuf, *nbuf;
4151 for (i = rxr->next_to_check; count != 0;) {
4152 struct mbuf *sendmp, *mh, *mp;
4154 u16 hlen, plen, hdr, vtag;
4157 /* Sync the ring. */
4158 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4159 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4161 cur = &rxr->rx_base[i];
4162 staterr = le32toh(cur->wb.upper.status_error);
4164 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
4166 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4173 cur->wb.upper.status_error = 0;
4174 rbuf = &rxr->rx_buffers[i];
4178 plen = le16toh(cur->wb.upper.length);
4179 ptype = le32toh(cur->wb.lower.lo_dword.data) &
4180 IXGBE_RXDADV_PKTTYPE_MASK;
4181 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
4182 vtag = le16toh(cur->wb.upper.vlan);
4183 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
4185 /* Make sure bad packets are discarded */
4186 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
4189 rxr->rx_discarded++;
4191 rxr->discard = FALSE;
4193 rxr->discard = TRUE;
4194 ixgbe_rx_discard(rxr, i);
4199 ** On 82599 which supports a hardware
4200 ** LRO (called HW RSC), packets need
4201 ** not be fragmented across sequential
4202 ** descriptors, rather the next descriptor
4203 ** is indicated in bits of the descriptor.
4204 ** This also means that we might proceses
4205 ** more than one packet at a time, something
4206 ** that has never been true before, it
4207 ** required eliminating global chain pointers
4208 ** in favor of what we are doing here. -jfv
4212 ** Figure out the next descriptor
4215 if (rxr->hw_rsc == TRUE) {
4216 rsc = ixgbe_rsc_count(cur);
4217 rxr->rsc_num += (rsc - 1);
4219 if (rsc) { /* Get hardware index */
4221 IXGBE_RXDADV_NEXTP_MASK) >>
4222 IXGBE_RXDADV_NEXTP_SHIFT);
4223 } else { /* Just sequential */
4225 if (nextp == adapter->num_rx_desc)
4228 nbuf = &rxr->rx_buffers[nextp];
4232 ** The header mbuf is ONLY used when header
4233 ** split is enabled, otherwise we get normal
4234 ** behavior, ie, both header and payload
4235 ** are DMA'd into the payload buffer.
4237 ** Rather than using the fmp/lmp global pointers
4238 ** we now keep the head of a packet chain in the
4239 ** buffer struct and pass this along from one
4240 ** descriptor to the next, until we get EOP.
4242 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
4243 /* This must be an initial descriptor */
4244 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
4245 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
4246 if (hlen > IXGBE_RX_HDR)
4247 hlen = IXGBE_RX_HDR;
4249 mh->m_flags |= M_PKTHDR;
4251 mh->m_pkthdr.len = mh->m_len;
4252 /* Null buf pointer so it is refreshed */
4253 rbuf->m_head = NULL;
4255 ** Check the payload length, this
4256 ** could be zero if its a small
4262 mp->m_flags &= ~M_PKTHDR;
4264 mh->m_pkthdr.len += mp->m_len;
4265 /* Null buf pointer so it is refreshed */
4266 rbuf->m_pack = NULL;
4267 rxr->rx_split_packets++;
4270 ** Now create the forward
4271 ** chain so when complete
4275 /* stash the chain head */
4277 /* Make forward chain */
4279 mp->m_next = nbuf->m_pack;
4281 mh->m_next = nbuf->m_pack;
4283 /* Singlet, prepare to send */
4285 if ((adapter->num_vlans) &&
4286 (staterr & IXGBE_RXD_STAT_VP)) {
4287 sendmp->m_pkthdr.ether_vtag = vtag;
4288 sendmp->m_flags |= M_VLANTAG;
4293 ** Either no header split, or a
4294 ** secondary piece of a fragmented
4299 ** See if there is a stored head
4300 ** that determines what we are
4303 rbuf->m_pack = rbuf->fmp = NULL;
4305 if (sendmp != NULL) { /* secondary frag */
4306 mp->m_flags &= ~M_PKTHDR;
4307 sendmp->m_pkthdr.len += mp->m_len;
4309 /* first desc of a non-ps chain */
4311 sendmp->m_flags |= M_PKTHDR;
4312 sendmp->m_pkthdr.len = mp->m_len;
4313 if (staterr & IXGBE_RXD_STAT_VP) {
4314 sendmp->m_pkthdr.ether_vtag = vtag;
4315 sendmp->m_flags |= M_VLANTAG;
4318 /* Pass the head pointer on */
4322 mp->m_next = nbuf->m_pack;
4326 /* Sending this frame? */
4328 sendmp->m_pkthdr.rcvif = ifp;
4331 /* capture data for AIM */
4332 rxr->bytes += sendmp->m_pkthdr.len;
4333 rxr->rx_bytes += sendmp->m_pkthdr.len;
4334 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
4335 ixgbe_rx_checksum(staterr, sendmp, ptype);
4336 #if __FreeBSD_version >= 800000
4337 sendmp->m_pkthdr.flowid = que->msix;
4338 sendmp->m_flags |= M_FLOWID;
4342 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4343 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4345 /* Advance our pointers to the next descriptor. */
4346 if (++i == adapter->num_rx_desc)
4349 /* Now send to the stack or do LRO */
4350 if (sendmp != NULL) {
4351 rxr->next_to_check = i;
4352 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
4353 i = rxr->next_to_check;
4356 /* Every 8 descriptors we go to refresh mbufs */
4357 if (processed == 8) {
4358 ixgbe_refresh_mbufs(rxr, i);
4363 /* Refresh any remaining buf structs */
4364 if (ixgbe_rx_unrefreshed(rxr))
4365 ixgbe_refresh_mbufs(rxr, i);
4367 rxr->next_to_check = i;
4370 * Flush any outstanding LRO work
4372 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
4373 SLIST_REMOVE_HEAD(&lro->lro_active, next);
4374 tcp_lro_flush(lro, queued);
4377 IXGBE_RX_UNLOCK(rxr);
4380 ** We still have cleaning to do?
4381 ** Schedule another interrupt if so.
4383 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
4384 ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
4392 /*********************************************************************
4394 * Verify that the hardware indicated that the checksum is valid.
4395 * Inform the stack about the status of checksum so that stack
4396 * doesn't spend time verifying the checksum.
4398 *********************************************************************/
4400 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
4402 u16 status = (u16) staterr;
4403 u8 errors = (u8) (staterr >> 24);
4406 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
4407 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
4410 if (status & IXGBE_RXD_STAT_IPCS) {
4411 if (!(errors & IXGBE_RXD_ERR_IPE)) {
4412 /* IP Checksum Good */
4413 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4414 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4417 mp->m_pkthdr.csum_flags = 0;
4419 if (status & IXGBE_RXD_STAT_L4CS) {
4420 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4421 #if __FreeBSD_version >= 800000
4423 type = CSUM_SCTP_VALID;
4425 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
4426 mp->m_pkthdr.csum_flags |= type;
4428 mp->m_pkthdr.csum_data = htons(0xffff);
4436 ** This routine is run via an vlan config EVENT,
4437 ** it enables us to use the HW Filter table since
4438 ** we can get the vlan id. This just creates the
4439 ** entry in the soft version of the VFTA, init will
4440 ** repopulate the real table.
4443 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4445 struct adapter *adapter = ifp->if_softc;
4448 if (ifp->if_softc != arg) /* Not our event */
4451 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4454 IXGBE_CORE_LOCK(adapter);
4455 index = (vtag >> 5) & 0x7F;
4457 adapter->shadow_vfta[index] |= (1 << bit);
4458 ++adapter->num_vlans;
4459 ixgbe_init_locked(adapter);
4460 IXGBE_CORE_UNLOCK(adapter);
4464 ** This routine is run via an vlan
4465 ** unconfig EVENT, remove our entry
4466 ** in the soft vfta.
4469 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4471 struct adapter *adapter = ifp->if_softc;
4474 if (ifp->if_softc != arg)
4477 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4480 IXGBE_CORE_LOCK(adapter);
4481 index = (vtag >> 5) & 0x7F;
4483 adapter->shadow_vfta[index] &= ~(1 << bit);
4484 --adapter->num_vlans;
4485 /* Re-init to load the changes */
4486 ixgbe_init_locked(adapter);
4487 IXGBE_CORE_UNLOCK(adapter);
4491 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
4493 struct ifnet *ifp = adapter->ifp;
4494 struct ixgbe_hw *hw = &adapter->hw;
4499 ** We get here thru init_locked, meaning
4500 ** a soft reset, this has already cleared
4501 ** the VFTA and other state, so if there
4502 ** have been no vlan's registered do nothing.
4504 if (adapter->num_vlans == 0)
4508 ** A soft reset zero's out the VFTA, so
4509 ** we need to repopulate it now.
4511 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
4512 if (adapter->shadow_vfta[i] != 0)
4513 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
4514 adapter->shadow_vfta[i]);
4516 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4517 /* Enable the Filter Table if enabled */
4518 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
4519 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
4520 ctrl |= IXGBE_VLNCTRL_VFE;
4522 if (hw->mac.type == ixgbe_mac_82598EB)
4523 ctrl |= IXGBE_VLNCTRL_VME;
4524 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
4526 /* On 82599 the VLAN enable is per/queue in RXDCTL */
4527 if (hw->mac.type != ixgbe_mac_82598EB)
4528 for (int i = 0; i < adapter->num_queues; i++) {
4529 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
4530 ctrl |= IXGBE_RXDCTL_VME;
4531 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
4536 ixgbe_enable_intr(struct adapter *adapter)
4538 struct ixgbe_hw *hw = &adapter->hw;
4539 struct ix_queue *que = adapter->queues;
4540 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4543 /* Enable Fan Failure detection */
4544 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4545 mask |= IXGBE_EIMS_GPI_SDP1;
4547 mask |= IXGBE_EIMS_ECC;
4548 mask |= IXGBE_EIMS_GPI_SDP1;
4549 mask |= IXGBE_EIMS_GPI_SDP2;
4551 mask |= IXGBE_EIMS_FLOW_DIR;
4555 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4557 /* With RSS we use auto clear */
4558 if (adapter->msix_mem) {
4559 mask = IXGBE_EIMS_ENABLE_MASK;
4560 /* Don't autoclear Link */
4561 mask &= ~IXGBE_EIMS_OTHER;
4562 mask &= ~IXGBE_EIMS_LSC;
4563 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4567 ** Now enable all queues, this is done separately to
4568 ** allow for handling the extended (beyond 32) MSIX
4569 ** vectors that can be used by 82599
4571 for (int i = 0; i < adapter->num_queues; i++, que++)
4572 ixgbe_enable_queue(adapter, que->msix);
4574 IXGBE_WRITE_FLUSH(hw);
4580 ixgbe_disable_intr(struct adapter *adapter)
4582 if (adapter->msix_mem)
4583 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4584 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
4585 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
4587 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
4588 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
4589 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
4591 IXGBE_WRITE_FLUSH(&adapter->hw);
4596 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
4600 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
4607 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
4609 pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
4616 ** Setup the correct IVAR register for a particular MSIX interrupt
4617 ** (yes this is all very magic and confusing :)
4618 ** - entry is the register array entry
4619 ** - vector is the MSIX vector for this queue
4620 ** - type is RX/TX/MISC
4623 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4625 struct ixgbe_hw *hw = &adapter->hw;
4628 vector |= IXGBE_IVAR_ALLOC_VAL;
4630 switch (hw->mac.type) {
4632 case ixgbe_mac_82598EB:
4634 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4636 entry += (type * 64);
4637 index = (entry >> 2) & 0x1F;
4638 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4639 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4640 ivar |= (vector << (8 * (entry & 0x3)));
4641 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4644 case ixgbe_mac_82599EB:
4645 if (type == -1) { /* MISC IVAR */
4646 index = (entry & 1) * 8;
4647 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4648 ivar &= ~(0xFF << index);
4649 ivar |= (vector << index);
4650 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4651 } else { /* RX/TX IVARS */
4652 index = (16 * (entry & 1)) + (8 * type);
4653 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4654 ivar &= ~(0xFF << index);
4655 ivar |= (vector << index);
4656 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4665 ixgbe_configure_ivars(struct adapter *adapter)
4667 struct ix_queue *que = adapter->queues;
4670 if (ixgbe_max_interrupt_rate > 0)
4671 newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4675 for (int i = 0; i < adapter->num_queues; i++, que++) {
4676 /* First the RX queue entry */
4677 ixgbe_set_ivar(adapter, i, que->msix, 0);
4678 /* ... and the TX */
4679 ixgbe_set_ivar(adapter, i, que->msix, 1);
4680 /* Set an Initial EITR value */
4681 IXGBE_WRITE_REG(&adapter->hw,
4682 IXGBE_EITR(que->msix), newitr);
4685 /* For the Link interrupt */
4686 ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
4690 ** ixgbe_sfp_probe - called in the local timer to
4691 ** determine if a port had optics inserted.
4693 static bool ixgbe_sfp_probe(struct adapter *adapter)
4695 struct ixgbe_hw *hw = &adapter->hw;
4696 device_t dev = adapter->dev;
4697 bool result = FALSE;
4699 if ((hw->phy.type == ixgbe_phy_nl) &&
4700 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4701 s32 ret = hw->phy.ops.identify_sfp(hw);
4704 ret = hw->phy.ops.reset(hw);
4705 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4706 device_printf(dev,"Unsupported SFP+ module detected!");
4707 printf(" Reload driver with supported module.\n");
4708 adapter->sfp_probe = FALSE;
4711 device_printf(dev,"SFP+ module detected!\n");
4712 /* We now have supported optics */
4713 adapter->sfp_probe = FALSE;
4714 /* Set the optics type so system reports correctly */
4715 ixgbe_setup_optics(adapter);
4723 ** Tasklet handler for MSIX Link interrupts
4724 ** - do outside interrupt since it might sleep
4727 ixgbe_handle_link(void *context, int pending)
4729 struct adapter *adapter = context;
4731 ixgbe_check_link(&adapter->hw,
4732 &adapter->link_speed, &adapter->link_up, 0);
4733 ixgbe_update_link_status(adapter);
4737 ** Tasklet for handling SFP module interrupts
4740 ixgbe_handle_mod(void *context, int pending)
4742 struct adapter *adapter = context;
4743 struct ixgbe_hw *hw = &adapter->hw;
4744 device_t dev = adapter->dev;
4747 err = hw->phy.ops.identify_sfp(hw);
4748 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4750 "Unsupported SFP+ module type was detected.\n");
4753 err = hw->mac.ops.setup_sfp(hw);
4754 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4756 "Setup failure - unsupported SFP+ module type.\n");
4759 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
4765 ** Tasklet for handling MSF (multispeed fiber) interrupts
4768 ixgbe_handle_msf(void *context, int pending)
4770 struct adapter *adapter = context;
4771 struct ixgbe_hw *hw = &adapter->hw;
4775 autoneg = hw->phy.autoneg_advertised;
4776 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4777 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4778 if (hw->mac.ops.setup_link)
4779 hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
4785 ** Tasklet for reinitializing the Flow Director filter table
4788 ixgbe_reinit_fdir(void *context, int pending)
4790 struct adapter *adapter = context;
4791 struct ifnet *ifp = adapter->ifp;
4793 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
4795 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
4796 adapter->fdir_reinit = 0;
4797 /* Restart the interface */
4798 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4803 /**********************************************************************
4805 * Update the board statistics counters.
4807 **********************************************************************/
4809 ixgbe_update_stats_counters(struct adapter *adapter)
4811 struct ifnet *ifp = adapter->ifp;
4812 struct ixgbe_hw *hw = &adapter->hw;
4813 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4814 u64 total_missed_rx = 0;
4816 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4817 adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4818 adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4819 adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4821 for (int i = 0; i < 8; i++) {
4823 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4824 /* missed_rx tallies misses for the gprc workaround */
4826 /* global total per queue */
4827 adapter->stats.mpc[i] += mp;
4828 /* Running comprehensive total for stats display */
4829 total_missed_rx += adapter->stats.mpc[i];
4830 if (hw->mac.type == ixgbe_mac_82598EB)
4831 adapter->stats.rnbc[i] +=
4832 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4833 adapter->stats.pxontxc[i] +=
4834 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
4835 adapter->stats.pxonrxc[i] +=
4836 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
4837 adapter->stats.pxofftxc[i] +=
4838 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
4839 adapter->stats.pxoffrxc[i] +=
4840 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
4841 adapter->stats.pxon2offc[i] +=
4842 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
4844 for (int i = 0; i < 16; i++) {
4845 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4846 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4847 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
4848 adapter->stats.qbrc[i] +=
4849 ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
4850 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4851 adapter->stats.qbtc[i] +=
4852 ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
4853 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4855 adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4856 adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4857 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4859 /* Hardware workaround, gprc counts missed packets */
4860 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4861 adapter->stats.gprc -= missed_rx;
4863 if (hw->mac.type != ixgbe_mac_82598EB) {
4864 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4865 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4866 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4867 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4868 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4869 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4870 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4871 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4873 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4874 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4875 /* 82598 only has a counter in the high register */
4876 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4877 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4878 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4882 * Workaround: mprc hardware is incorrectly counting
4883 * broadcasts, so for now we subtract those.
4885 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4886 adapter->stats.bprc += bprc;
4887 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4888 if (hw->mac.type == ixgbe_mac_82598EB)
4889 adapter->stats.mprc -= bprc;
4891 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4892 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4893 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4894 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4895 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4896 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4898 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4899 adapter->stats.lxontxc += lxon;
4900 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4901 adapter->stats.lxofftxc += lxoff;
4902 total = lxon + lxoff;
4904 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4905 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4906 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4907 adapter->stats.gptc -= total;
4908 adapter->stats.mptc -= total;
4909 adapter->stats.ptc64 -= total;
4910 adapter->stats.gotc -= total * ETHER_MIN_LEN;
4912 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4913 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4914 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4915 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4916 adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4917 adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4918 adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4919 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4920 adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4921 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4922 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4923 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4924 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4925 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4926 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4927 adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4928 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4929 adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4930 /* Only read FCOE on 82599 */
4931 if (hw->mac.type != ixgbe_mac_82598EB) {
4932 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4933 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4934 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4935 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4936 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4939 /* Fill out the OS statistics structure */
4940 ifp->if_ipackets = adapter->stats.gprc;
4941 ifp->if_opackets = adapter->stats.gptc;
4942 ifp->if_ibytes = adapter->stats.gorc;
4943 ifp->if_obytes = adapter->stats.gotc;
4944 ifp->if_imcasts = adapter->stats.mprc;
4945 ifp->if_collisions = 0;
4948 ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
4949 adapter->stats.rlec;
4952 /** ixgbe_sysctl_tdh_handler - Handler function
4953 * Retrieves the TDH value from the hardware
4956 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4960 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4963 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4964 error = sysctl_handle_int(oidp, &val, 0, req);
4965 if (error || !req->newptr)
4970 /** ixgbe_sysctl_tdt_handler - Handler function
4971 * Retrieves the TDT value from the hardware
4974 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4978 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4981 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4982 error = sysctl_handle_int(oidp, &val, 0, req);
4983 if (error || !req->newptr)
4988 /** ixgbe_sysctl_rdh_handler - Handler function
4989 * Retrieves the RDH value from the hardware
4992 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4996 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4999 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
5000 error = sysctl_handle_int(oidp, &val, 0, req);
5001 if (error || !req->newptr)
5006 /** ixgbe_sysctl_rdt_handler - Handler function
5007 * Retrieves the RDT value from the hardware
5010 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
5014 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
5017 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
5018 error = sysctl_handle_int(oidp, &val, 0, req);
5019 if (error || !req->newptr)
5025 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
5028 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
5029 unsigned int reg, usec, rate;
5031 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
5032 usec = ((reg & 0x0FF8) >> 3);
5034 rate = 1000000 / usec;
5037 error = sysctl_handle_int(oidp, &rate, 0, req);
5038 if (error || !req->newptr)
5044 * Add sysctl variables, one per statistic, to the system.
5047 ixgbe_add_hw_stats(struct adapter *adapter)
5050 device_t dev = adapter->dev;
5052 struct tx_ring *txr = adapter->tx_rings;
5053 struct rx_ring *rxr = adapter->rx_rings;
5055 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
5056 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
5057 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
5058 struct ixgbe_hw_stats *stats = &adapter->stats;
5060 struct sysctl_oid *stat_node, *queue_node;
5061 struct sysctl_oid_list *stat_list, *queue_list;
5063 #define QUEUE_NAME_LEN 32
5064 char namebuf[QUEUE_NAME_LEN];
5066 /* Driver Statistics */
5067 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
5068 CTLFLAG_RD, &adapter->dropped_pkts,
5069 "Driver dropped packets");
5070 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
5071 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
5072 "m_defrag() failed");
5073 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup",
5074 CTLFLAG_RD, &adapter->no_tx_dma_setup,
5075 "Driver tx dma failure in xmit");
5076 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
5077 CTLFLAG_RD, &adapter->watchdog_events,
5078 "Watchdog timeouts");
5079 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx",
5080 CTLFLAG_RD, &adapter->tso_tx,
5082 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
5083 CTLFLAG_RD, &adapter->link_irq,
5084 "Link MSIX IRQ Handled");
5086 for (int i = 0; i < adapter->num_queues; i++, txr++) {
5087 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5088 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5089 CTLFLAG_RD, NULL, "Queue Name");
5090 queue_list = SYSCTL_CHILDREN(queue_node);
5092 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
5093 CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i],
5094 sizeof(&adapter->queues[i]),
5095 ixgbe_sysctl_interrupt_rate_handler, "IU",
5097 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
5098 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
5099 ixgbe_sysctl_tdh_handler, "IU",
5100 "Transmit Descriptor Head");
5101 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
5102 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
5103 ixgbe_sysctl_tdt_handler, "IU",
5104 "Transmit Descriptor Tail");
5105 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
5106 CTLFLAG_RD, &txr->no_desc_avail,
5107 "Queue No Descriptor Available");
5108 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
5109 CTLFLAG_RD, &txr->total_packets,
5110 "Queue Packets Transmitted");
5113 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
5114 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5115 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5116 CTLFLAG_RD, NULL, "Queue Name");
5117 queue_list = SYSCTL_CHILDREN(queue_node);
5119 struct lro_ctrl *lro = &rxr->lro;
5121 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5122 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5123 CTLFLAG_RD, NULL, "Queue Name");
5124 queue_list = SYSCTL_CHILDREN(queue_node);
5126 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
5127 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
5128 ixgbe_sysctl_rdh_handler, "IU",
5129 "Receive Descriptor Head");
5130 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
5131 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
5132 ixgbe_sysctl_rdt_handler, "IU",
5133 "Receive Descriptor Tail");
5134 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
5135 CTLFLAG_RD, &rxr->rx_packets,
5136 "Queue Packets Received");
5137 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
5138 CTLFLAG_RD, &rxr->rx_bytes,
5139 "Queue Bytes Received");
5140 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
5141 CTLFLAG_RD, &lro->lro_queued, 0,
5143 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
5144 CTLFLAG_RD, &lro->lro_flushed, 0,
5148 /* MAC stats get the own sub node */
5150 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
5151 CTLFLAG_RD, NULL, "MAC Statistics");
5152 stat_list = SYSCTL_CHILDREN(stat_node);
5154 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
5155 CTLFLAG_RD, &stats->crcerrs,
5157 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
5158 CTLFLAG_RD, &stats->illerrc,
5159 "Illegal Byte Errors");
5160 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
5161 CTLFLAG_RD, &stats->errbc,
5163 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
5164 CTLFLAG_RD, &stats->mspdc,
5165 "MAC Short Packets Discarded");
5166 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
5167 CTLFLAG_RD, &stats->mlfc,
5168 "MAC Local Faults");
5169 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
5170 CTLFLAG_RD, &stats->mrfc,
5171 "MAC Remote Faults");
5172 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
5173 CTLFLAG_RD, &stats->rlec,
5174 "Receive Length Errors");
5175 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_txd",
5176 CTLFLAG_RD, &stats->lxontxc,
5177 "Link XON Transmitted");
5178 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd",
5179 CTLFLAG_RD, &stats->lxonrxc,
5180 "Link XON Received");
5181 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd",
5182 CTLFLAG_RD, &stats->lxofftxc,
5183 "Link XOFF Transmitted");
5184 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd",
5185 CTLFLAG_RD, &stats->lxoffrxc,
5186 "Link XOFF Received");
5188 /* Packet Reception Stats */
5189 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
5190 CTLFLAG_RD, &stats->tor,
5191 "Total Octets Received");
5192 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
5193 CTLFLAG_RD, &stats->gorc,
5194 "Good Octets Received");
5195 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
5196 CTLFLAG_RD, &stats->tpr,
5197 "Total Packets Received");
5198 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
5199 CTLFLAG_RD, &stats->gprc,
5200 "Good Packets Received");
5201 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
5202 CTLFLAG_RD, &stats->mprc,
5203 "Multicast Packets Received");
5204 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
5205 CTLFLAG_RD, &stats->bprc,
5206 "Broadcast Packets Received");
5207 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
5208 CTLFLAG_RD, &stats->prc64,
5209 "64 byte frames received ");
5210 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
5211 CTLFLAG_RD, &stats->prc127,
5212 "65-127 byte frames received");
5213 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
5214 CTLFLAG_RD, &stats->prc255,
5215 "128-255 byte frames received");
5216 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
5217 CTLFLAG_RD, &stats->prc511,
5218 "256-511 byte frames received");
5219 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
5220 CTLFLAG_RD, &stats->prc1023,
5221 "512-1023 byte frames received");
5222 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
5223 CTLFLAG_RD, &stats->prc1522,
5224 "1023-1522 byte frames received");
5225 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
5226 CTLFLAG_RD, &stats->ruc,
5227 "Receive Undersized");
5228 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
5229 CTLFLAG_RD, &stats->rfc,
5230 "Fragmented Packets Received ");
5231 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
5232 CTLFLAG_RD, &stats->roc,
5233 "Oversized Packets Received");
5234 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
5235 CTLFLAG_RD, &stats->rjc,
5237 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
5238 CTLFLAG_RD, &stats->mngprc,
5239 "Management Packets Received");
5240 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
5241 CTLFLAG_RD, &stats->mngptc,
5242 "Management Packets Dropped");
5243 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
5244 CTLFLAG_RD, &stats->xec,
5247 /* Packet Transmission Stats */
5248 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
5249 CTLFLAG_RD, &stats->gotc,
5250 "Good Octets Transmitted");
5251 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
5252 CTLFLAG_RD, &stats->tpt,
5253 "Total Packets Transmitted");
5254 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
5255 CTLFLAG_RD, &stats->gptc,
5256 "Good Packets Transmitted");
5257 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
5258 CTLFLAG_RD, &stats->bptc,
5259 "Broadcast Packets Transmitted");
5260 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
5261 CTLFLAG_RD, &stats->mptc,
5262 "Multicast Packets Transmitted");
5263 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
5264 CTLFLAG_RD, &stats->mngptc,
5265 "Management Packets Transmitted");
5266 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
5267 CTLFLAG_RD, &stats->ptc64,
5268 "64 byte frames transmitted ");
5269 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
5270 CTLFLAG_RD, &stats->ptc127,
5271 "65-127 byte frames transmitted");
5272 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
5273 CTLFLAG_RD, &stats->ptc255,
5274 "128-255 byte frames transmitted");
5275 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
5276 CTLFLAG_RD, &stats->ptc511,
5277 "256-511 byte frames transmitted");
5278 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
5279 CTLFLAG_RD, &stats->ptc1023,
5280 "512-1023 byte frames transmitted");
5281 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
5282 CTLFLAG_RD, &stats->ptc1522,
5283 "1024-1522 byte frames transmitted");
5286 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_crc",
5287 CTLFLAG_RD, &stats->fccrc,
5289 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_last",
5290 CTLFLAG_RD, &stats->fclast,
5292 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_drpd",
5293 CTLFLAG_RD, &stats->fcoerpdc,
5294 "FCoE Packets Dropped");
5295 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd",
5296 CTLFLAG_RD, &stats->fcoeprc,
5297 "FCoE Packets Received");
5298 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd",
5299 CTLFLAG_RD, &stats->fcoeptc,
5300 "FCoE Packets Transmitted");
5301 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd",
5302 CTLFLAG_RD, &stats->fcoedwrc,
5303 "FCoE DWords Received");
5304 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd",
5305 CTLFLAG_RD, &stats->fcoedwtc,
5306 "FCoE DWords Transmitted");
5310 ** Set flow control using sysctl:
5311 ** Flow control values:
5318 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
5321 int last = ixgbe_flow_control;
5322 struct adapter *adapter;
5324 error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
5328 /* Don't bother if it's not changed */
5329 if (ixgbe_flow_control == last)
5332 adapter = (struct adapter *) arg1;
5333 switch (ixgbe_flow_control) {
5334 case ixgbe_fc_rx_pause:
5335 case ixgbe_fc_tx_pause:
5337 adapter->hw.fc.requested_mode = ixgbe_flow_control;
5341 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5344 ixgbe_fc_enable(&adapter->hw, 0);
5349 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
5350 const char *description, int *limit, int value)
5353 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5354 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5355 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5359 ** Control link advertise speed:
5361 ** 1 - advertise only 1G
5364 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
5367 struct adapter *adapter;
5368 struct ixgbe_hw *hw;
5369 ixgbe_link_speed speed, last;
5371 adapter = (struct adapter *) arg1;
5373 last = hw->phy.autoneg_advertised;
5375 error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
5377 if ((error) || (adapter->advertise == -1))
5380 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5381 (hw->phy.multispeed_fiber)))
5384 if (adapter->advertise == 1)
5385 speed = IXGBE_LINK_SPEED_1GB_FULL;
5387 speed = IXGBE_LINK_SPEED_1GB_FULL |
5388 IXGBE_LINK_SPEED_10GB_FULL;
5390 if (speed == last) /* no change */
5393 hw->mac.autotry_restart = TRUE;
5394 hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);