1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
44 /************************************************************************
46 ************************************************************************/
47 char ixgbe_driver_version[] = "3.2.12-k";
50 /************************************************************************
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
105 /* required last entry */
109 /************************************************************************
110 * Table of branding strings
111 ************************************************************************/
112 static char *ixgbe_strings[] = {
113 "Intel(R) PRO/10GbE PCI-Express Network Driver"
116 /************************************************************************
117 * Function prototypes
118 ************************************************************************/
119 static int ixgbe_probe(device_t);
120 static int ixgbe_attach(device_t);
121 static int ixgbe_detach(device_t);
122 static int ixgbe_shutdown(device_t);
123 static int ixgbe_suspend(device_t);
124 static int ixgbe_resume(device_t);
125 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
126 static void ixgbe_init(void *);
127 static void ixgbe_init_locked(struct adapter *);
128 static void ixgbe_stop(void *);
129 #if __FreeBSD_version >= 1100036
130 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
132 static void ixgbe_init_device_features(struct adapter *);
133 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
134 static void ixgbe_add_media_types(struct adapter *);
135 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136 static int ixgbe_media_change(struct ifnet *);
137 static int ixgbe_allocate_pci_resources(struct adapter *);
138 static void ixgbe_get_slot_info(struct adapter *);
139 static int ixgbe_allocate_msix(struct adapter *);
140 static int ixgbe_allocate_legacy(struct adapter *);
141 static int ixgbe_configure_interrupts(struct adapter *);
142 static void ixgbe_free_pci_resources(struct adapter *);
143 static void ixgbe_local_timer(void *);
144 static int ixgbe_setup_interface(device_t, struct adapter *);
145 static void ixgbe_config_gpie(struct adapter *);
146 static void ixgbe_config_dmac(struct adapter *);
147 static void ixgbe_config_delay_values(struct adapter *);
148 static void ixgbe_config_link(struct adapter *);
149 static void ixgbe_check_wol_support(struct adapter *);
150 static int ixgbe_setup_low_power_mode(struct adapter *);
151 static void ixgbe_rearm_queues(struct adapter *, u64);
153 static void ixgbe_initialize_transmit_units(struct adapter *);
154 static void ixgbe_initialize_receive_units(struct adapter *);
155 static void ixgbe_enable_rx_drop(struct adapter *);
156 static void ixgbe_disable_rx_drop(struct adapter *);
157 static void ixgbe_initialize_rss_mapping(struct adapter *);
159 static void ixgbe_enable_intr(struct adapter *);
160 static void ixgbe_disable_intr(struct adapter *);
161 static void ixgbe_update_stats_counters(struct adapter *);
162 static void ixgbe_set_promisc(struct adapter *);
163 static void ixgbe_set_multi(struct adapter *);
164 static void ixgbe_update_link_status(struct adapter *);
165 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166 static void ixgbe_configure_ivars(struct adapter *);
167 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
169 static void ixgbe_setup_vlan_hw_support(struct adapter *);
170 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
171 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
173 static void ixgbe_add_device_sysctls(struct adapter *);
174 static void ixgbe_add_hw_stats(struct adapter *);
175 static int ixgbe_set_flowcntl(struct adapter *, int);
176 static int ixgbe_set_advertise(struct adapter *, int);
177 static int ixgbe_get_advertise(struct adapter *);
179 /* Sysctl handlers */
180 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
181 const char *, int *, int);
182 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
192 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
200 /* Support for pluggable optic modules */
201 static bool ixgbe_sfp_probe(struct adapter *);
203 /* Legacy (single vector) interrupt handler */
204 static void ixgbe_legacy_irq(void *);
206 /* The MSI/MSI-X Interrupt handlers */
207 static void ixgbe_msix_que(void *);
208 static void ixgbe_msix_link(void *);
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_que(void *, int);
212 static void ixgbe_handle_link(void *, int);
213 static void ixgbe_handle_msf(void *, int);
214 static void ixgbe_handle_mod(void *, int);
215 static void ixgbe_handle_phy(void *, int);
218 /************************************************************************
219 * FreeBSD Device Interface Entry Points
220 ************************************************************************/
221 static device_method_t ix_methods[] = {
222 /* Device interface */
223 DEVMETHOD(device_probe, ixgbe_probe),
224 DEVMETHOD(device_attach, ixgbe_attach),
225 DEVMETHOD(device_detach, ixgbe_detach),
226 DEVMETHOD(device_shutdown, ixgbe_shutdown),
227 DEVMETHOD(device_suspend, ixgbe_suspend),
228 DEVMETHOD(device_resume, ixgbe_resume),
230 DEVMETHOD(pci_iov_init, ixgbe_init_iov),
231 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
232 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
237 static driver_t ix_driver = {
238 "ix", ix_methods, sizeof(struct adapter),
241 devclass_t ix_devclass;
242 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
244 MODULE_DEPEND(ix, pci, 1, 1, 1);
245 MODULE_DEPEND(ix, ether, 1, 1, 1);
246 MODULE_DEPEND(ix, netmap, 1, 1, 1);
249 * TUNEABLE PARAMETERS:
252 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
255 * AIM: Adaptive Interrupt Moderation
256 * which means that the interrupt rate
257 * is varied over time based on the
258 * traffic for that interrupt vector
260 static int ixgbe_enable_aim = TRUE;
261 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
262 "Enable adaptive interrupt moderation");
264 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
265 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
266 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
268 /* How many packets rxeof tries to clean at a time */
269 static int ixgbe_rx_process_limit = 256;
270 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
271 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
273 /* How many packets txeof tries to clean at a time */
274 static int ixgbe_tx_process_limit = 256;
275 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
276 &ixgbe_tx_process_limit, 0,
277 "Maximum number of sent packets to process at a time, -1 means unlimited");
279 /* Flow control setting, default to full */
280 static int ixgbe_flow_control = ixgbe_fc_full;
281 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
282 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
284 /* Advertise Speed, default to 0 (auto) */
285 static int ixgbe_advertise_speed = 0;
286 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
287 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
290 * Smart speed setting, default to on
291 * this only works as a compile option
292 * right now as its during attach, set
293 * this to 'ixgbe_smart_speed_off' to
296 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
299 * MSI-X should be the default for best performance,
300 * but this allows it to be forced off for testing.
302 static int ixgbe_enable_msix = 1;
303 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
304 "Enable MSI-X interrupts");
307 * Number of Queues, can be set to 0,
308 * it then autoconfigures based on the
309 * number of cpus with a max of 8. This
310 * can be overriden manually here.
312 static int ixgbe_num_queues = 0;
313 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
314 "Number of queues to configure, 0 indicates autoconfigure");
317 * Number of TX descriptors per ring,
318 * setting higher than RX as this seems
319 * the better performing choice.
321 static int ixgbe_txd = PERFORM_TXD;
322 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
323 "Number of transmit descriptors per queue");
325 /* Number of RX descriptors per ring */
326 static int ixgbe_rxd = PERFORM_RXD;
327 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
328 "Number of receive descriptors per queue");
331 * Defining this on will allow the use
332 * of unsupported SFP+ modules, note that
333 * doing so you are on your own :)
335 static int allow_unsupported_sfp = FALSE;
336 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
337 &allow_unsupported_sfp, 0,
338 "Allow unsupported SFP modules...use at your own risk");
341 * Not sure if Flow Director is fully baked,
342 * so we'll default to turning it off.
344 static int ixgbe_enable_fdir = 0;
345 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
346 "Enable Flow Director");
348 /* Legacy Transmit (single queue) */
349 static int ixgbe_enable_legacy_tx = 0;
350 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
351 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
353 /* Receive-Side Scaling */
354 static int ixgbe_enable_rss = 1;
355 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
356 "Enable Receive-Side Scaling (RSS)");
358 /* Keep running tab on them for sanity check */
359 static int ixgbe_total_ports;
361 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
362 static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
364 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
366 /************************************************************************
367 * ixgbe_initialize_rss_mapping
368 ************************************************************************/
370 ixgbe_initialize_rss_mapping(struct adapter *adapter)
372 struct ixgbe_hw *hw = &adapter->hw;
373 u32 reta = 0, mrqc, rss_key[10];
374 int queue_id, table_size, index_mult;
378 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
379 /* Fetch the configured RSS key */
380 rss_getkey((uint8_t *)&rss_key);
382 /* set up random bits */
383 arc4rand(&rss_key, sizeof(rss_key), 0);
386 /* Set multiplier for RETA setup and table size based on MAC */
389 switch (adapter->hw.mac.type) {
390 case ixgbe_mac_82598EB:
394 case ixgbe_mac_X550EM_x:
395 case ixgbe_mac_X550EM_a:
402 /* Set up the redirection table */
403 for (i = 0, j = 0; i < table_size; i++, j++) {
404 if (j == adapter->num_queues)
407 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
409 * Fetch the RSS bucket id for the given indirection
410 * entry. Cap it at the number of configured buckets
411 * (which is num_queues.)
413 queue_id = rss_get_indirection_to_bucket(i);
414 queue_id = queue_id % adapter->num_queues;
416 queue_id = (j * index_mult);
419 * The low 8 bits are for hash value (n+0);
420 * The next 8 bits are for hash value (n+1), etc.
423 reta = reta | (((uint32_t)queue_id) << 24);
426 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
428 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
434 /* Now fill our hash function seeds */
435 for (i = 0; i < 10; i++)
436 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
438 /* Perform hash on these packet types */
439 if (adapter->feat_en & IXGBE_FEATURE_RSS)
440 rss_hash_config = rss_gethashconfig();
443 * Disable UDP - IP fragments aren't currently being handled
444 * and so we end up with a mix of 2-tuple and 4-tuple
447 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
448 | RSS_HASHTYPE_RSS_TCP_IPV4
449 | RSS_HASHTYPE_RSS_IPV6
450 | RSS_HASHTYPE_RSS_TCP_IPV6
451 | RSS_HASHTYPE_RSS_IPV6_EX
452 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
455 mrqc = IXGBE_MRQC_RSSEN;
456 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
457 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
458 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
459 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
460 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
461 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
462 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
463 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
464 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
465 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
466 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
467 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
468 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
469 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
470 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
471 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
473 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
474 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
475 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
476 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
477 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
478 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
479 } /* ixgbe_initialize_rss_mapping */
481 /************************************************************************
482 * ixgbe_initialize_receive_units - Setup receive registers and features.
483 ************************************************************************/
484 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
487 ixgbe_initialize_receive_units(struct adapter *adapter)
489 struct rx_ring *rxr = adapter->rx_rings;
490 struct ixgbe_hw *hw = &adapter->hw;
491 struct ifnet *ifp = adapter->ifp;
493 u32 bufsz, fctrl, srrctl, rxcsum;
497 * Make sure receives are disabled while
498 * setting up the descriptor ring
500 ixgbe_disable_rx(hw);
502 /* Enable broadcasts */
503 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
504 fctrl |= IXGBE_FCTRL_BAM;
505 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
506 fctrl |= IXGBE_FCTRL_DPF;
507 fctrl |= IXGBE_FCTRL_PMCF;
509 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
511 /* Set for Jumbo Frames? */
512 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
513 if (ifp->if_mtu > ETHERMTU)
514 hlreg |= IXGBE_HLREG0_JUMBOEN;
516 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
519 /* CRC stripping is conditional in Netmap */
520 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
521 (ifp->if_capenable & IFCAP_NETMAP) &&
523 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
525 #endif /* DEV_NETMAP */
526 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
528 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
530 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
531 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
533 for (i = 0; i < adapter->num_queues; i++, rxr++) {
534 u64 rdba = rxr->rxdma.dma_paddr;
537 /* Setup the Base and Length of the Rx Descriptor Ring */
538 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
539 (rdba & 0x00000000ffffffffULL));
540 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
541 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
542 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
544 /* Set up the SRRCTL register */
545 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
546 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
547 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
549 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
552 * Set DROP_EN iff we have no flow control and >1 queue.
553 * Note that srrctl was cleared shortly before during reset,
554 * so we do not need to clear the bit, but do it just in case
555 * this code is moved elsewhere.
557 if (adapter->num_queues > 1 &&
558 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
559 srrctl |= IXGBE_SRRCTL_DROP_EN;
561 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
564 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
566 /* Setup the HW Rx Head and Tail Descriptor Pointers */
567 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
568 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
570 /* Set the driver rx tail address */
571 rxr->tail = IXGBE_RDT(rxr->me);
574 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
575 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
576 | IXGBE_PSRTYPE_UDPHDR
577 | IXGBE_PSRTYPE_IPV4HDR
578 | IXGBE_PSRTYPE_IPV6HDR;
579 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
582 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
584 ixgbe_initialize_rss_mapping(adapter);
586 if (adapter->num_queues > 1) {
587 /* RSS and RX IPP Checksum are mutually exclusive */
588 rxcsum |= IXGBE_RXCSUM_PCSD;
591 if (ifp->if_capenable & IFCAP_RXCSUM)
592 rxcsum |= IXGBE_RXCSUM_PCSD;
594 /* This is useful for calculating UDP/IP fragment checksums */
595 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
596 rxcsum |= IXGBE_RXCSUM_IPPCSE;
598 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
601 } /* ixgbe_initialize_receive_units */
603 /************************************************************************
604 * ixgbe_initialize_transmit_units - Enable transmit units.
605 ************************************************************************/
607 ixgbe_initialize_transmit_units(struct adapter *adapter)
609 struct tx_ring *txr = adapter->tx_rings;
610 struct ixgbe_hw *hw = &adapter->hw;
612 /* Setup the Base and Length of the Tx Descriptor Ring */
613 for (int i = 0; i < adapter->num_queues; i++, txr++) {
614 u64 tdba = txr->txdma.dma_paddr;
618 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
619 (tdba & 0x00000000ffffffffULL));
620 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
621 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
622 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
624 /* Setup the HW Tx Head and Tail descriptor pointers */
625 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
626 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
628 /* Cache the tail address */
629 txr->tail = IXGBE_TDT(j);
631 /* Disable Head Writeback */
633 * Note: for X550 series devices, these registers are actually
634 * prefixed with TPH_ isntead of DCA_, but the addresses and
635 * fields remain the same.
637 switch (hw->mac.type) {
638 case ixgbe_mac_82598EB:
639 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
642 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
645 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
646 switch (hw->mac.type) {
647 case ixgbe_mac_82598EB:
648 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
651 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
657 if (hw->mac.type != ixgbe_mac_82598EB) {
658 u32 dmatxctl, rttdcs;
660 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
661 dmatxctl |= IXGBE_DMATXCTL_TE;
662 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
663 /* Disable arbiter to set MTQC */
664 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
665 rttdcs |= IXGBE_RTTDCS_ARBDIS;
666 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
667 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
668 ixgbe_get_mtqc(adapter->iov_mode));
669 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
670 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
674 } /* ixgbe_initialize_transmit_units */
676 /************************************************************************
677 * ixgbe_attach - Device initialization routine
679 * Called when the driver is being loaded.
680 * Identifies the type of hardware, allocates all resources
681 * and initializes the hardware.
683 * return 0 on success, positive on failure
684 ************************************************************************/
686 ixgbe_attach(device_t dev)
688 struct adapter *adapter;
693 INIT_DEBUGOUT("ixgbe_attach: begin");
695 /* Allocate, clear, and link in our adapter structure */
696 adapter = device_get_softc(dev);
697 adapter->hw.back = adapter;
702 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
704 /* Set up the timer callout */
705 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
707 /* Determine hardware revision */
708 hw->vendor_id = pci_get_vendor(dev);
709 hw->device_id = pci_get_device(dev);
710 hw->revision_id = pci_get_revid(dev);
711 hw->subsystem_vendor_id = pci_get_subvendor(dev);
712 hw->subsystem_device_id = pci_get_subdevice(dev);
715 * Make sure BUSMASTER is set
717 pci_enable_busmaster(dev);
719 /* Do base PCI setup - map BAR0 */
720 if (ixgbe_allocate_pci_resources(adapter)) {
721 device_printf(dev, "Allocation of PCI resources failed\n");
726 /* let hardware know driver is loaded */
727 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
728 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
729 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
732 * Initialize the shared code
734 if (ixgbe_init_shared_code(hw)) {
735 device_printf(dev, "Unable to initialize the shared code\n");
740 if (hw->mbx.ops.init_params)
741 hw->mbx.ops.init_params(hw);
743 hw->allow_unsupported_sfp = allow_unsupported_sfp;
745 /* Pick up the 82599 settings */
746 if (hw->mac.type != ixgbe_mac_82598EB) {
747 hw->phy.smart_speed = ixgbe_smart_speed;
748 adapter->num_segs = IXGBE_82599_SCATTER;
750 adapter->num_segs = IXGBE_82598_SCATTER;
752 ixgbe_init_device_features(adapter);
754 if (ixgbe_configure_interrupts(adapter)) {
759 /* Allocate multicast array memory. */
760 adapter->mta = malloc(sizeof(*adapter->mta) *
761 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
762 if (adapter->mta == NULL) {
763 device_printf(dev, "Can not allocate multicast setup array\n");
768 /* Enable WoL (if supported) */
769 ixgbe_check_wol_support(adapter);
771 /* Register for VLAN events */
772 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
773 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
774 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
775 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
777 /* Verify adapter fan is still functional (if applicable) */
778 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
779 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
780 ixgbe_check_fan_failure(adapter, esdp, FALSE);
783 /* Ensure SW/FW semaphore is free */
784 ixgbe_init_swfw_semaphore(hw);
786 /* Enable EEE power saving */
787 if (adapter->feat_en & IXGBE_FEATURE_EEE)
788 hw->mac.ops.setup_eee(hw, TRUE);
790 /* Set an initial default flow control value */
791 hw->fc.requested_mode = ixgbe_flow_control;
793 /* Sysctls for limiting the amount of work done in the taskqueues */
794 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
795 "max number of rx packets to process",
796 &adapter->rx_process_limit, ixgbe_rx_process_limit);
798 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
799 "max number of tx packets to process",
800 &adapter->tx_process_limit, ixgbe_tx_process_limit);
802 /* Do descriptor calc and sanity checks */
803 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
804 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
805 device_printf(dev, "TXD config issue, using default!\n");
806 adapter->num_tx_desc = DEFAULT_TXD;
808 adapter->num_tx_desc = ixgbe_txd;
811 * With many RX rings it is easy to exceed the
812 * system mbuf allocation. Tuning nmbclusters
813 * can alleviate this.
815 if (nmbclusters > 0) {
817 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
818 if (s > nmbclusters) {
819 device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
820 ixgbe_rxd = DEFAULT_RXD;
824 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
825 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
826 device_printf(dev, "RXD config issue, using default!\n");
827 adapter->num_rx_desc = DEFAULT_RXD;
829 adapter->num_rx_desc = ixgbe_rxd;
831 /* Allocate our TX/RX Queues */
832 if (ixgbe_allocate_queues(adapter)) {
837 hw->phy.reset_if_overtemp = TRUE;
838 error = ixgbe_reset_hw(hw);
839 hw->phy.reset_if_overtemp = FALSE;
840 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
842 * No optics in this port, set up
843 * so the timer routine will probe
844 * for later insertion.
846 adapter->sfp_probe = TRUE;
847 error = IXGBE_SUCCESS;
848 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849 device_printf(dev, "Unsupported SFP+ module detected!\n");
853 device_printf(dev, "Hardware initialization failed\n");
858 /* Make sure we have a good EEPROM before we read from it */
859 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
860 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
865 /* Setup OS specific network interface */
866 if (ixgbe_setup_interface(dev, adapter) != 0)
869 if (adapter->feat_en & IXGBE_FEATURE_MSIX)
870 error = ixgbe_allocate_msix(adapter);
872 error = ixgbe_allocate_legacy(adapter);
876 error = ixgbe_start_hw(hw);
878 case IXGBE_ERR_EEPROM_VERSION:
879 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
881 case IXGBE_ERR_SFP_NOT_SUPPORTED:
882 device_printf(dev, "Unsupported SFP+ Module\n");
885 case IXGBE_ERR_SFP_NOT_PRESENT:
886 device_printf(dev, "No SFP+ Module found\n");
892 /* Enable the optics for 82599 SFP+ fiber */
893 ixgbe_enable_tx_laser(hw);
895 /* Enable power to the phy. */
896 ixgbe_set_phy_power(hw, TRUE);
898 /* Initialize statistics */
899 ixgbe_update_stats_counters(adapter);
901 /* Check PCIE slot type/speed/width */
902 ixgbe_get_slot_info(adapter);
905 * Do time init and sysctl init here, but
906 * only on the first port of a bypass adapter.
908 ixgbe_bypass_init(adapter);
910 /* Set an initial dmac value */
912 /* Set initial advertised speeds (if applicable) */
913 adapter->advertise = ixgbe_get_advertise(adapter);
915 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
916 ixgbe_define_iov_schemas(dev, &error);
919 ixgbe_add_device_sysctls(adapter);
920 ixgbe_add_hw_stats(adapter);
923 adapter->init_locked = ixgbe_init_locked;
924 adapter->stop_locked = ixgbe_stop;
926 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
927 ixgbe_netmap_attach(adapter);
929 INIT_DEBUGOUT("ixgbe_attach: end");
934 ixgbe_free_transmit_structures(adapter);
935 ixgbe_free_receive_structures(adapter);
936 free(adapter->queues, M_DEVBUF);
938 if (adapter->ifp != NULL)
939 if_free(adapter->ifp);
940 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
941 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
942 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
943 ixgbe_free_pci_resources(adapter);
944 free(adapter->mta, M_IXGBE);
945 IXGBE_CORE_LOCK_DESTROY(adapter);
950 /************************************************************************
951 * ixgbe_check_wol_support
953 * Checks whether the adapter's ports are capable of
954 * Wake On LAN by reading the adapter's NVM.
956 * Sets each port's hw->wol_enabled value depending
957 * on the value read here.
958 ************************************************************************/
960 ixgbe_check_wol_support(struct adapter *adapter)
962 struct ixgbe_hw *hw = &adapter->hw;
965 /* Find out WoL support for port */
966 adapter->wol_support = hw->wol_enabled = 0;
967 ixgbe_get_device_caps(hw, &dev_caps);
968 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
969 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
971 adapter->wol_support = hw->wol_enabled = 1;
973 /* Save initial wake up filter configuration */
974 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
977 } /* ixgbe_check_wol_support */
979 /************************************************************************
980 * ixgbe_setup_interface
982 * Setup networking device structure and register an interface.
983 ************************************************************************/
985 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
989 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
991 ifp = adapter->ifp = if_alloc(IFT_ETHER);
993 device_printf(dev, "can not allocate ifnet structure\n");
996 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
997 ifp->if_baudrate = IF_Gbps(10);
998 ifp->if_init = ixgbe_init;
999 ifp->if_softc = adapter;
1000 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1001 ifp->if_ioctl = ixgbe_ioctl;
1002 #if __FreeBSD_version >= 1100036
1003 if_setgetcounterfn(ifp, ixgbe_get_counter);
1005 #if __FreeBSD_version >= 1100045
1006 /* TSO parameters */
1007 ifp->if_hw_tsomax = 65518;
1008 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1009 ifp->if_hw_tsomaxsegsize = 2048;
1011 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1012 ifp->if_start = ixgbe_legacy_start;
1013 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1014 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1015 IFQ_SET_READY(&ifp->if_snd);
1016 ixgbe_start_locked = ixgbe_legacy_start_locked;
1017 ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1019 ifp->if_transmit = ixgbe_mq_start;
1020 ifp->if_qflush = ixgbe_qflush;
1021 ixgbe_start_locked = ixgbe_mq_start_locked;
1022 ixgbe_ring_empty = drbr_empty;
1025 ether_ifattach(ifp, adapter->hw.mac.addr);
1027 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1030 * Tell the upper layer(s) we support long frames.
1032 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1034 /* Set capability flags */
1035 ifp->if_capabilities |= IFCAP_HWCSUM
1039 | IFCAP_VLAN_HWTAGGING
1046 /* Enable the above capabilities by default */
1047 ifp->if_capenable = ifp->if_capabilities;
1050 * Don't turn this on by default, if vlans are
1051 * created on another pseudo device (eg. lagg)
1052 * then vlan events are not passed thru, breaking
1053 * operation, but with HW FILTER off it works. If
1054 * using vlans directly on the ixgbe driver you can
1055 * enable this and get full hardware tag filtering.
1057 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1060 * Specify the media types supported by this adapter and register
1061 * callbacks to update media and link information
1063 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1064 ixgbe_media_status);
1066 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1067 ixgbe_add_media_types(adapter);
1069 /* Set autoselect media by default */
1070 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1073 } /* ixgbe_setup_interface */
1075 #if __FreeBSD_version >= 1100036
1076 /************************************************************************
1078 ************************************************************************/
1080 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
1082 struct adapter *adapter;
1083 struct tx_ring *txr;
1086 adapter = if_getsoftc(ifp);
1089 case IFCOUNTER_IPACKETS:
1090 return (adapter->ipackets);
1091 case IFCOUNTER_OPACKETS:
1092 return (adapter->opackets);
1093 case IFCOUNTER_IBYTES:
1094 return (adapter->ibytes);
1095 case IFCOUNTER_OBYTES:
1096 return (adapter->obytes);
1097 case IFCOUNTER_IMCASTS:
1098 return (adapter->imcasts);
1099 case IFCOUNTER_OMCASTS:
1100 return (adapter->omcasts);
1101 case IFCOUNTER_COLLISIONS:
1103 case IFCOUNTER_IQDROPS:
1104 return (adapter->iqdrops);
1105 case IFCOUNTER_OQDROPS:
1107 txr = adapter->tx_rings;
1108 for (int i = 0; i < adapter->num_queues; i++, txr++)
1109 rv += txr->br->br_drops;
1111 case IFCOUNTER_IERRORS:
1112 return (adapter->ierrors);
1114 return (if_get_counter_default(ifp, cnt));
1116 } /* ixgbe_get_counter */
1119 /************************************************************************
1120 * ixgbe_add_media_types
1121 ************************************************************************/
1123 ixgbe_add_media_types(struct adapter *adapter)
1125 struct ixgbe_hw *hw = &adapter->hw;
1126 device_t dev = adapter->dev;
1129 layer = adapter->phy_layer;
1131 /* Media types with matching FreeBSD media defines */
1132 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1133 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1134 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1135 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1136 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1137 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1138 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1139 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1141 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1142 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1143 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1146 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1147 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1148 if (hw->phy.multispeed_fiber)
1149 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1152 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1153 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1154 if (hw->phy.multispeed_fiber)
1155 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1157 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1158 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1159 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1160 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1162 #ifdef IFM_ETH_XTYPE
1163 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1164 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1165 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1166 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1167 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1168 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1169 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1170 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1172 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1173 device_printf(dev, "Media supported: 10GbaseKR\n");
1174 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1175 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1177 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1178 device_printf(dev, "Media supported: 10GbaseKX4\n");
1179 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1180 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1182 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1183 device_printf(dev, "Media supported: 1000baseKX\n");
1184 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1185 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1187 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1188 device_printf(dev, "Media supported: 2500baseKX\n");
1189 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1190 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1193 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1194 device_printf(dev, "Media supported: 1000baseBX\n");
1196 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1197 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1199 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1202 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1203 } /* ixgbe_add_media_types */
1205 /************************************************************************
1207 ************************************************************************/
1209 ixgbe_is_sfp(struct ixgbe_hw *hw)
1211 switch (hw->mac.type) {
1212 case ixgbe_mac_82598EB:
1213 if (hw->phy.type == ixgbe_phy_nl)
1216 case ixgbe_mac_82599EB:
1217 switch (hw->mac.ops.get_media_type(hw)) {
1218 case ixgbe_media_type_fiber:
1219 case ixgbe_media_type_fiber_qsfp:
1224 case ixgbe_mac_X550EM_x:
1225 case ixgbe_mac_X550EM_a:
1226 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1232 } /* ixgbe_is_sfp */
1234 /************************************************************************
1236 ************************************************************************/
1238 ixgbe_config_link(struct adapter *adapter)
1240 struct ixgbe_hw *hw = &adapter->hw;
1241 u32 autoneg, err = 0;
1242 bool sfp, negotiate;
1244 sfp = ixgbe_is_sfp(hw);
1247 if (hw->phy.multispeed_fiber) {
1248 hw->mac.ops.setup_sfp(hw);
1249 ixgbe_enable_tx_laser(hw);
1250 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1252 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1254 if (hw->mac.ops.check_link)
1255 err = ixgbe_check_link(hw, &adapter->link_speed,
1256 &adapter->link_up, FALSE);
1259 autoneg = hw->phy.autoneg_advertised;
1260 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1261 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1265 if (hw->mac.ops.setup_link)
1266 err = hw->mac.ops.setup_link(hw, autoneg,
1272 } /* ixgbe_config_link */
1274 /************************************************************************
1275 * ixgbe_update_stats_counters - Update board statistics counters.
1276 ************************************************************************/
1278 ixgbe_update_stats_counters(struct adapter *adapter)
1280 struct ixgbe_hw *hw = &adapter->hw;
1281 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1282 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1283 u64 total_missed_rx = 0;
1285 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1286 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1287 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1288 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1289 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1291 for (int i = 0; i < 16; i++) {
1292 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1293 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1294 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1296 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1297 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1298 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1300 /* Hardware workaround, gprc counts missed packets */
1301 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1302 stats->gprc -= missed_rx;
1304 if (hw->mac.type != ixgbe_mac_82598EB) {
1305 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1306 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1307 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1308 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1309 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1310 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1311 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1312 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1314 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1315 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1316 /* 82598 only has a counter in the high register */
1317 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1318 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1319 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1323 * Workaround: mprc hardware is incorrectly counting
1324 * broadcasts, so for now we subtract those.
1326 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1327 stats->bprc += bprc;
1328 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1329 if (hw->mac.type == ixgbe_mac_82598EB)
1330 stats->mprc -= bprc;
1332 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1333 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1334 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1335 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1336 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1337 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1339 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1340 stats->lxontxc += lxon;
1341 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1342 stats->lxofftxc += lxoff;
1343 total = lxon + lxoff;
1345 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1346 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1347 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1348 stats->gptc -= total;
1349 stats->mptc -= total;
1350 stats->ptc64 -= total;
1351 stats->gotc -= total * ETHER_MIN_LEN;
1353 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1354 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1355 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1356 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1357 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1358 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1359 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1360 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1361 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1362 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1363 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1364 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1365 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1366 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1367 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1368 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1369 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1370 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1371 /* Only read FCOE on 82599 */
1372 if (hw->mac.type != ixgbe_mac_82598EB) {
1373 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1374 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1375 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1376 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1377 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1380 /* Fill out the OS statistics structure */
1381 IXGBE_SET_IPACKETS(adapter, stats->gprc);
1382 IXGBE_SET_OPACKETS(adapter, stats->gptc);
1383 IXGBE_SET_IBYTES(adapter, stats->gorc);
1384 IXGBE_SET_OBYTES(adapter, stats->gotc);
1385 IXGBE_SET_IMCASTS(adapter, stats->mprc);
1386 IXGBE_SET_OMCASTS(adapter, stats->mptc);
1387 IXGBE_SET_COLLISIONS(adapter, 0);
1388 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1389 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1390 } /* ixgbe_update_stats_counters */
1392 /************************************************************************
1393 * ixgbe_add_hw_stats
1395 * Add sysctl variables, one per statistic, to the system.
1396 ************************************************************************/
1398 ixgbe_add_hw_stats(struct adapter *adapter)
1400 device_t dev = adapter->dev;
1401 struct tx_ring *txr = adapter->tx_rings;
1402 struct rx_ring *rxr = adapter->rx_rings;
1403 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1404 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1405 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1406 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1407 struct sysctl_oid *stat_node, *queue_node;
1408 struct sysctl_oid_list *stat_list, *queue_list;
1410 #define QUEUE_NAME_LEN 32
1411 char namebuf[QUEUE_NAME_LEN];
1413 /* Driver Statistics */
1414 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1415 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1416 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1417 CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1418 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1419 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1420 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1421 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1423 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1424 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1425 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1426 CTLFLAG_RD, NULL, "Queue Name");
1427 queue_list = SYSCTL_CHILDREN(queue_node);
1429 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1430 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1431 sizeof(&adapter->queues[i]),
1432 ixgbe_sysctl_interrupt_rate_handler, "IU",
1434 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1435 CTLFLAG_RD, &(adapter->queues[i].irqs),
1436 "irqs on this queue");
1437 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1438 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1439 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1440 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1441 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1442 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1443 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1444 CTLFLAG_RD, &txr->tso_tx, "TSO");
1445 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1446 CTLFLAG_RD, &txr->no_tx_dma_setup,
1447 "Driver tx dma failure in xmit");
1448 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1449 CTLFLAG_RD, &txr->no_desc_avail,
1450 "Queue No Descriptor Available");
1451 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1452 CTLFLAG_RD, &txr->total_packets,
1453 "Queue Packets Transmitted");
1454 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1455 CTLFLAG_RD, &txr->br->br_drops,
1456 "Packets dropped in buf_ring");
1459 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1460 struct lro_ctrl *lro = &rxr->lro;
1462 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1463 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1464 CTLFLAG_RD, NULL, "Queue Name");
1465 queue_list = SYSCTL_CHILDREN(queue_node);
1467 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1468 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1469 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1470 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1471 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1472 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1473 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1474 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1475 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1476 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1477 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1478 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1479 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1480 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1481 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1482 CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1483 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1484 CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1487 /* MAC stats get their own sub node */
1489 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1490 CTLFLAG_RD, NULL, "MAC Statistics");
1491 stat_list = SYSCTL_CHILDREN(stat_node);
1493 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1494 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1495 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1496 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1497 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1498 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1499 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1500 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1501 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1502 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1503 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1504 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1505 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1506 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1507 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1508 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1510 /* Flow Control stats */
1511 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1512 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1513 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1514 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1515 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1516 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1517 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1518 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1520 /* Packet Reception Stats */
1521 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1522 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1523 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1524 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1525 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1526 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1527 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1528 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1529 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1530 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1531 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1532 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1533 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1534 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1535 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1536 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1537 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1538 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1539 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1540 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1541 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1542 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1543 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1544 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1545 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1546 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1547 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1548 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1549 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1550 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1551 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1552 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1553 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1554 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1555 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1556 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1557 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1558 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1560 /* Packet Transmission Stats */
1561 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1562 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1563 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1564 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1565 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1566 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1567 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1568 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1569 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1570 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1571 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1572 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1573 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1574 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1575 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1576 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1577 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1578 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1579 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1580 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1581 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1582 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1583 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1584 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1585 } /* ixgbe_add_hw_stats */
1587 /************************************************************************
1588 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1590 * Retrieves the TDH value from the hardware
1591 ************************************************************************/
1593 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1595 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1602 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1603 error = sysctl_handle_int(oidp, &val, 0, req);
1604 if (error || !req->newptr)
1608 } /* ixgbe_sysctl_tdh_handler */
1610 /************************************************************************
1611 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1613 * Retrieves the TDT value from the hardware
1614 ************************************************************************/
1616 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1618 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1625 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1626 error = sysctl_handle_int(oidp, &val, 0, req);
1627 if (error || !req->newptr)
1631 } /* ixgbe_sysctl_tdt_handler */
1633 /************************************************************************
1634 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1636 * Retrieves the RDH value from the hardware
1637 ************************************************************************/
1639 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1641 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1648 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1649 error = sysctl_handle_int(oidp, &val, 0, req);
1650 if (error || !req->newptr)
1654 } /* ixgbe_sysctl_rdh_handler */
1656 /************************************************************************
1657 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1659 * Retrieves the RDT value from the hardware
1660 ************************************************************************/
1662 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1664 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1671 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1672 error = sysctl_handle_int(oidp, &val, 0, req);
1673 if (error || !req->newptr)
1677 } /* ixgbe_sysctl_rdt_handler */
1679 /************************************************************************
1680 * ixgbe_register_vlan
1682 * Run via vlan config EVENT, it enables us to use the
1683 * HW Filter table since we can get the vlan id. This
1684 * just creates the entry in the soft version of the
1685 * VFTA, init will repopulate the real table.
1686 ************************************************************************/
1688 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1690 struct adapter *adapter = ifp->if_softc;
1693 if (ifp->if_softc != arg) /* Not our event */
1696 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1699 IXGBE_CORE_LOCK(adapter);
1700 index = (vtag >> 5) & 0x7F;
1702 adapter->shadow_vfta[index] |= (1 << bit);
1703 ++adapter->num_vlans;
1704 ixgbe_setup_vlan_hw_support(adapter);
1705 IXGBE_CORE_UNLOCK(adapter);
1706 } /* ixgbe_register_vlan */
1708 /************************************************************************
1709 * ixgbe_unregister_vlan
1711 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1712 ************************************************************************/
1714 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1716 struct adapter *adapter = ifp->if_softc;
1719 if (ifp->if_softc != arg)
1722 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1725 IXGBE_CORE_LOCK(adapter);
1726 index = (vtag >> 5) & 0x7F;
1728 adapter->shadow_vfta[index] &= ~(1 << bit);
1729 --adapter->num_vlans;
1730 /* Re-init to load the changes */
1731 ixgbe_setup_vlan_hw_support(adapter);
1732 IXGBE_CORE_UNLOCK(adapter);
1733 } /* ixgbe_unregister_vlan */
1735 /************************************************************************
1736 * ixgbe_setup_vlan_hw_support
1737 ************************************************************************/
1739 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1741 struct ifnet *ifp = adapter->ifp;
1742 struct ixgbe_hw *hw = &adapter->hw;
1743 struct rx_ring *rxr;
1749 * We get here thru init_locked, meaning
1750 * a soft reset, this has already cleared
1751 * the VFTA and other state, so if there
1752 * have been no vlan's registered do nothing.
1754 if (adapter->num_vlans == 0)
1757 /* Setup the queues for vlans */
1758 for (i = 0; i < adapter->num_queues; i++) {
1759 rxr = &adapter->rx_rings[i];
1760 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1761 if (hw->mac.type != ixgbe_mac_82598EB) {
1762 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1763 ctrl |= IXGBE_RXDCTL_VME;
1764 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1766 rxr->vtag_strip = TRUE;
1769 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1772 * A soft reset zero's out the VFTA, so
1773 * we need to repopulate it now.
1775 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1776 if (adapter->shadow_vfta[i] != 0)
1777 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1778 adapter->shadow_vfta[i]);
1780 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1781 /* Enable the Filter Table if enabled */
1782 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1783 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1784 ctrl |= IXGBE_VLNCTRL_VFE;
1786 if (hw->mac.type == ixgbe_mac_82598EB)
1787 ctrl |= IXGBE_VLNCTRL_VME;
1788 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1789 } /* ixgbe_setup_vlan_hw_support */
1791 /************************************************************************
1792 * ixgbe_get_slot_info
1794 * Get the width and transaction speed of
1795 * the slot this adapter is plugged into.
1796 ************************************************************************/
1798 ixgbe_get_slot_info(struct adapter *adapter)
1800 device_t dev = adapter->dev;
1801 struct ixgbe_hw *hw = &adapter->hw;
1804 int bus_info_valid = TRUE;
1806 /* Some devices are behind an internal bridge */
1807 switch (hw->device_id) {
1808 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1809 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1810 goto get_parent_info;
1815 ixgbe_get_bus_info(hw);
1818 * Some devices don't use PCI-E, but there is no need
1819 * to display "Unknown" for bus speed and width.
1821 switch (hw->mac.type) {
1822 case ixgbe_mac_X550EM_x:
1823 case ixgbe_mac_X550EM_a:
1831 * For the Quad port adapter we need to parse back
1832 * up the PCI tree to find the speed of the expansion
1833 * slot into which this adapter is plugged. A bit more work.
1835 dev = device_get_parent(device_get_parent(dev));
1837 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1838 pci_get_slot(dev), pci_get_function(dev));
1840 dev = device_get_parent(device_get_parent(dev));
1842 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1843 pci_get_slot(dev), pci_get_function(dev));
1845 /* Now get the PCI Express Capabilities offset */
1846 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1848 * Hmm...can't get PCI-Express capabilities.
1849 * Falling back to default method.
1851 bus_info_valid = FALSE;
1852 ixgbe_get_bus_info(hw);
1855 /* ...and read the Link Status Register */
1856 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1857 ixgbe_set_pci_config_data_generic(hw, link);
1860 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1861 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
1862 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
1863 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
1865 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1866 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1867 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1870 if (bus_info_valid) {
1871 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1872 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1873 (hw->bus.speed == ixgbe_bus_speed_2500))) {
1874 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1875 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1877 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1878 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1879 (hw->bus.speed < ixgbe_bus_speed_8000))) {
1880 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1881 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1884 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1887 } /* ixgbe_get_slot_info */
1889 /************************************************************************
1890 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1891 ************************************************************************/
1893 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1895 struct ixgbe_hw *hw = &adapter->hw;
1896 u64 queue = (u64)(1 << vector);
1899 if (hw->mac.type == ixgbe_mac_82598EB) {
1900 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1901 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1903 mask = (queue & 0xFFFFFFFF);
1905 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1906 mask = (queue >> 32);
1908 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1910 } /* ixgbe_enable_queue */
1912 /************************************************************************
1913 * ixgbe_disable_queue
1914 ************************************************************************/
1916 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1918 struct ixgbe_hw *hw = &adapter->hw;
1919 u64 queue = (u64)(1 << vector);
1922 if (hw->mac.type == ixgbe_mac_82598EB) {
1923 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1924 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1926 mask = (queue & 0xFFFFFFFF);
1928 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1929 mask = (queue >> 32);
1931 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1933 } /* ixgbe_disable_queue */
1935 /************************************************************************
1936 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1937 ************************************************************************/
1939 ixgbe_msix_que(void *arg)
1941 struct ix_queue *que = arg;
1942 struct adapter *adapter = que->adapter;
1943 struct ifnet *ifp = adapter->ifp;
1944 struct tx_ring *txr = que->txr;
1945 struct rx_ring *rxr = que->rxr;
1950 /* Protect against spurious interrupts */
1951 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1954 ixgbe_disable_queue(adapter, que->msix);
1957 more = ixgbe_rxeof(que);
1961 if (!ixgbe_ring_empty(ifp, txr->br))
1962 ixgbe_start_locked(ifp, txr);
1963 IXGBE_TX_UNLOCK(txr);
1967 if (adapter->enable_aim == FALSE)
1970 * Do Adaptive Interrupt Moderation:
1971 * - Write out last calculated setting
1972 * - Calculate based on average size over
1973 * the last interval.
1975 if (que->eitr_setting)
1976 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1979 que->eitr_setting = 0;
1981 /* Idle, do nothing */
1982 if ((txr->bytes == 0) && (rxr->bytes == 0))
1985 if ((txr->bytes) && (txr->packets))
1986 newitr = txr->bytes/txr->packets;
1987 if ((rxr->bytes) && (rxr->packets))
1988 newitr = max(newitr, (rxr->bytes / rxr->packets));
1989 newitr += 24; /* account for hardware frame, crc */
1991 /* set an upper boundary */
1992 newitr = min(newitr, 3000);
1994 /* Be nice to the mid range */
1995 if ((newitr > 300) && (newitr < 1200))
1996 newitr = (newitr / 3);
1998 newitr = (newitr / 2);
2000 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2001 newitr |= newitr << 16;
2003 newitr |= IXGBE_EITR_CNT_WDIS;
2005 /* save for next interrupt */
2006 que->eitr_setting = newitr;
2016 taskqueue_enqueue(que->tq, &que->que_task);
2018 ixgbe_enable_queue(adapter, que->msix);
2021 } /* ixgbe_msix_que */
2023 /************************************************************************
2024 * ixgbe_media_status - Media Ioctl callback
2026 * Called whenever the user queries the status of
2027 * the interface using ifconfig.
2028 ************************************************************************/
2030 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2032 struct adapter *adapter = ifp->if_softc;
2033 struct ixgbe_hw *hw = &adapter->hw;
2036 INIT_DEBUGOUT("ixgbe_media_status: begin");
2037 IXGBE_CORE_LOCK(adapter);
2038 ixgbe_update_link_status(adapter);
2040 ifmr->ifm_status = IFM_AVALID;
2041 ifmr->ifm_active = IFM_ETHER;
2043 if (!adapter->link_active) {
2044 IXGBE_CORE_UNLOCK(adapter);
2048 ifmr->ifm_status |= IFM_ACTIVE;
2049 layer = adapter->phy_layer;
2051 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2052 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2053 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2054 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2055 switch (adapter->link_speed) {
2056 case IXGBE_LINK_SPEED_10GB_FULL:
2057 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2059 case IXGBE_LINK_SPEED_1GB_FULL:
2060 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2062 case IXGBE_LINK_SPEED_100_FULL:
2063 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2065 case IXGBE_LINK_SPEED_10_FULL:
2066 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2069 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2070 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2071 switch (adapter->link_speed) {
2072 case IXGBE_LINK_SPEED_10GB_FULL:
2073 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2076 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2077 switch (adapter->link_speed) {
2078 case IXGBE_LINK_SPEED_10GB_FULL:
2079 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2081 case IXGBE_LINK_SPEED_1GB_FULL:
2082 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2085 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2086 switch (adapter->link_speed) {
2087 case IXGBE_LINK_SPEED_10GB_FULL:
2088 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2090 case IXGBE_LINK_SPEED_1GB_FULL:
2091 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2094 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2095 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2096 switch (adapter->link_speed) {
2097 case IXGBE_LINK_SPEED_10GB_FULL:
2098 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2100 case IXGBE_LINK_SPEED_1GB_FULL:
2101 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2104 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2105 switch (adapter->link_speed) {
2106 case IXGBE_LINK_SPEED_10GB_FULL:
2107 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2111 * XXX: These need to use the proper media types once
2114 #ifndef IFM_ETH_XTYPE
2115 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2116 switch (adapter->link_speed) {
2117 case IXGBE_LINK_SPEED_10GB_FULL:
2118 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2120 case IXGBE_LINK_SPEED_2_5GB_FULL:
2121 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2123 case IXGBE_LINK_SPEED_1GB_FULL:
2124 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2127 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2128 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2129 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2130 switch (adapter->link_speed) {
2131 case IXGBE_LINK_SPEED_10GB_FULL:
2132 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2134 case IXGBE_LINK_SPEED_2_5GB_FULL:
2135 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2137 case IXGBE_LINK_SPEED_1GB_FULL:
2138 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2142 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2143 switch (adapter->link_speed) {
2144 case IXGBE_LINK_SPEED_10GB_FULL:
2145 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2147 case IXGBE_LINK_SPEED_2_5GB_FULL:
2148 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2150 case IXGBE_LINK_SPEED_1GB_FULL:
2151 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2154 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2155 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2156 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2157 switch (adapter->link_speed) {
2158 case IXGBE_LINK_SPEED_10GB_FULL:
2159 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2161 case IXGBE_LINK_SPEED_2_5GB_FULL:
2162 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2164 case IXGBE_LINK_SPEED_1GB_FULL:
2165 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2170 /* If nothing is recognized... */
2171 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2172 ifmr->ifm_active |= IFM_UNKNOWN;
2174 #if __FreeBSD_version >= 900025
2175 /* Display current flow control setting used on link */
2176 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2177 hw->fc.current_mode == ixgbe_fc_full)
2178 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2179 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2180 hw->fc.current_mode == ixgbe_fc_full)
2181 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2184 IXGBE_CORE_UNLOCK(adapter);
2187 } /* ixgbe_media_status */
2189 /************************************************************************
2190 * ixgbe_media_change - Media Ioctl callback
2192 * Called when the user changes speed/duplex using
2193 * media/mediopt option with ifconfig.
2194 ************************************************************************/
2196 ixgbe_media_change(struct ifnet *ifp)
2198 struct adapter *adapter = ifp->if_softc;
2199 struct ifmedia *ifm = &adapter->media;
2200 struct ixgbe_hw *hw = &adapter->hw;
2201 ixgbe_link_speed speed = 0;
2203 INIT_DEBUGOUT("ixgbe_media_change: begin");
2205 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2208 if (hw->phy.media_type == ixgbe_media_type_backplane)
2212 * We don't actually need to check against the supported
2213 * media types of the adapter; ifmedia will take care of
2216 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2219 speed |= IXGBE_LINK_SPEED_100_FULL;
2220 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2221 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2225 #ifndef IFM_ETH_XTYPE
2226 case IFM_10G_SR: /* KR, too */
2227 case IFM_10G_CX4: /* KX4 */
2232 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2233 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2235 #ifndef IFM_ETH_XTYPE
2236 case IFM_1000_CX: /* KX */
2242 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2245 speed |= IXGBE_LINK_SPEED_100_FULL;
2246 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2248 case IFM_10G_TWINAX:
2249 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2252 speed |= IXGBE_LINK_SPEED_100_FULL;
2255 speed |= IXGBE_LINK_SPEED_10_FULL;
2261 hw->mac.autotry_restart = TRUE;
2262 hw->mac.ops.setup_link(hw, speed, TRUE);
2263 adapter->advertise =
2264 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2265 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2266 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2267 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
2272 device_printf(adapter->dev, "Invalid media type!\n");
2275 } /* ixgbe_media_change */
2277 /************************************************************************
2279 ************************************************************************/
2281 ixgbe_set_promisc(struct adapter *adapter)
2283 struct ifnet *ifp = adapter->ifp;
2287 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2288 rctl &= (~IXGBE_FCTRL_UPE);
2289 if (ifp->if_flags & IFF_ALLMULTI)
2290 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2292 struct ifmultiaddr *ifma;
2293 #if __FreeBSD_version < 800000
2296 if_maddr_rlock(ifp);
2298 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2299 if (ifma->ifma_addr->sa_family != AF_LINK)
2301 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2305 #if __FreeBSD_version < 800000
2306 IF_ADDR_UNLOCK(ifp);
2308 if_maddr_runlock(ifp);
2311 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2312 rctl &= (~IXGBE_FCTRL_MPE);
2313 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2315 if (ifp->if_flags & IFF_PROMISC) {
2316 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2317 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2318 } else if (ifp->if_flags & IFF_ALLMULTI) {
2319 rctl |= IXGBE_FCTRL_MPE;
2320 rctl &= ~IXGBE_FCTRL_UPE;
2321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2323 } /* ixgbe_set_promisc */
2325 /************************************************************************
2326 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2327 ************************************************************************/
2329 ixgbe_msix_link(void *arg)
2331 struct adapter *adapter = arg;
2332 struct ixgbe_hw *hw = &adapter->hw;
2333 u32 eicr, eicr_mask;
2336 ++adapter->link_irq;
2338 /* Pause other interrupts */
2339 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2341 /* First get the cause */
2342 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2343 /* Be sure the queue bits are not cleared */
2344 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2345 /* Clear interrupt with write */
2346 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2348 /* Link status change */
2349 if (eicr & IXGBE_EICR_LSC) {
2350 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2351 taskqueue_enqueue(adapter->tq, &adapter->link_task);
2354 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2355 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2356 (eicr & IXGBE_EICR_FLOW_DIR)) {
2357 /* This is probably overkill :) */
2358 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2360 /* Disable the interrupt */
2361 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2362 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
2365 if (eicr & IXGBE_EICR_ECC) {
2366 device_printf(adapter->dev,
2367 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
2368 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2371 /* Check for over temp condition */
2372 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2373 switch (adapter->hw.mac.type) {
2374 case ixgbe_mac_X550EM_a:
2375 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2377 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2378 IXGBE_EICR_GPI_SDP0_X550EM_a);
2379 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2380 IXGBE_EICR_GPI_SDP0_X550EM_a);
2381 retval = hw->phy.ops.check_overtemp(hw);
2382 if (retval != IXGBE_ERR_OVERTEMP)
2384 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2385 device_printf(adapter->dev, "System shutdown required!\n");
2388 if (!(eicr & IXGBE_EICR_TS))
2390 retval = hw->phy.ops.check_overtemp(hw);
2391 if (retval != IXGBE_ERR_OVERTEMP)
2393 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2394 device_printf(adapter->dev, "System shutdown required!\n");
2395 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2400 /* Check for VF message */
2401 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2402 (eicr & IXGBE_EICR_MAILBOX))
2403 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
2406 if (ixgbe_is_sfp(hw)) {
2407 /* Pluggable optics-related interrupt */
2408 if (hw->mac.type >= ixgbe_mac_X540)
2409 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2411 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2413 if (eicr & eicr_mask) {
2414 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2415 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2418 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2419 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2420 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2421 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2422 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2426 /* Check for fan failure */
2427 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2428 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2429 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2432 /* External PHY interrupt */
2433 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2434 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2435 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2436 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
2439 /* Re-enable other interrupts */
2440 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2441 } /* ixgbe_msix_link */
2443 /************************************************************************
2444 * ixgbe_sysctl_interrupt_rate_handler
2445 ************************************************************************/
2447 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2449 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2451 unsigned int reg, usec, rate;
2453 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2454 usec = ((reg & 0x0FF8) >> 3);
2456 rate = 500000 / usec;
2459 error = sysctl_handle_int(oidp, &rate, 0, req);
2460 if (error || !req->newptr)
2462 reg &= ~0xfff; /* default, no limitation */
2463 ixgbe_max_interrupt_rate = 0;
2464 if (rate > 0 && rate < 500000) {
2467 ixgbe_max_interrupt_rate = rate;
2468 reg |= ((4000000/rate) & 0xff8);
2470 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2473 } /* ixgbe_sysctl_interrupt_rate_handler */
2475 /************************************************************************
2476 * ixgbe_add_device_sysctls
2477 ************************************************************************/
2479 ixgbe_add_device_sysctls(struct adapter *adapter)
2481 device_t dev = adapter->dev;
2482 struct ixgbe_hw *hw = &adapter->hw;
2483 struct sysctl_oid_list *child;
2484 struct sysctl_ctx_list *ctx;
2486 ctx = device_get_sysctl_ctx(dev);
2487 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2489 /* Sysctls for all devices */
2490 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2491 adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2493 adapter->enable_aim = ixgbe_enable_aim;
2494 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2495 &adapter->enable_aim, 1, "Interrupt Moderation");
2497 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2498 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2499 IXGBE_SYSCTL_DESC_ADV_SPEED);
2502 /* testing sysctls (for all devices) */
2503 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2504 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2505 "I", "PCI Power State");
2507 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2508 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2509 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2511 /* for X550 series devices */
2512 if (hw->mac.type >= ixgbe_mac_X550)
2513 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2514 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2515 "I", "DMA Coalesce");
2517 /* for WoL-capable devices */
2518 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2519 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2520 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2521 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2523 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2524 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2525 "I", "Enable/Disable Wake Up Filters");
2528 /* for X552/X557-AT devices */
2529 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2530 struct sysctl_oid *phy_node;
2531 struct sysctl_oid_list *phy_list;
2533 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2534 CTLFLAG_RD, NULL, "External PHY sysctls");
2535 phy_list = SYSCTL_CHILDREN(phy_node);
2537 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2538 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2539 "I", "Current External PHY Temperature (Celsius)");
2541 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2542 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2543 ixgbe_sysctl_phy_overtemp_occurred, "I",
2544 "External PHY High Temperature Event Occurred");
2547 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2548 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2549 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2550 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2552 } /* ixgbe_add_device_sysctls */
2554 /************************************************************************
2555 * ixgbe_allocate_pci_resources
2556 ************************************************************************/
2558 ixgbe_allocate_pci_resources(struct adapter *adapter)
2560 device_t dev = adapter->dev;
2564 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2567 if (!(adapter->pci_mem)) {
2568 device_printf(dev, "Unable to allocate bus resource: memory\n");
2572 /* Save bus_space values for READ/WRITE_REG macros */
2573 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2574 adapter->osdep.mem_bus_space_handle =
2575 rman_get_bushandle(adapter->pci_mem);
2576 /* Set hw values for shared code */
2577 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2580 } /* ixgbe_allocate_pci_resources */
2582 /************************************************************************
2583 * ixgbe_detach - Device removal routine
2585 * Called when the driver is being removed.
2586 * Stops the adapter and deallocates all the resources
2587 * that were allocated for driver operation.
2589 * return 0 on success, positive on failure
2590 ************************************************************************/
2592 ixgbe_detach(device_t dev)
2594 struct adapter *adapter = device_get_softc(dev);
2595 struct ix_queue *que = adapter->queues;
2596 struct tx_ring *txr = adapter->tx_rings;
2599 INIT_DEBUGOUT("ixgbe_detach: begin");
2601 /* Make sure VLANS are not using driver */
2602 if (adapter->ifp->if_vlantrunk != NULL) {
2603 device_printf(dev, "Vlan in use, detach first\n");
2607 if (ixgbe_pci_iov_detach(dev) != 0) {
2608 device_printf(dev, "SR-IOV in use; detach first.\n");
2612 ether_ifdetach(adapter->ifp);
2613 /* Stop the adapter */
2614 IXGBE_CORE_LOCK(adapter);
2615 ixgbe_setup_low_power_mode(adapter);
2616 IXGBE_CORE_UNLOCK(adapter);
2618 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2620 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2621 taskqueue_drain(que->tq, &txr->txq_task);
2622 taskqueue_drain(que->tq, &que->que_task);
2623 taskqueue_free(que->tq);
2627 /* Drain the Link queue */
2629 taskqueue_drain(adapter->tq, &adapter->link_task);
2630 taskqueue_drain(adapter->tq, &adapter->mod_task);
2631 taskqueue_drain(adapter->tq, &adapter->msf_task);
2632 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2633 taskqueue_drain(adapter->tq, &adapter->mbx_task);
2634 taskqueue_drain(adapter->tq, &adapter->phy_task);
2635 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2636 taskqueue_drain(adapter->tq, &adapter->fdir_task);
2637 taskqueue_free(adapter->tq);
2640 /* let hardware know driver is unloading */
2641 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2642 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2645 /* Unregister VLAN events */
2646 if (adapter->vlan_attach != NULL)
2647 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2648 if (adapter->vlan_detach != NULL)
2649 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2651 callout_drain(&adapter->timer);
2653 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2654 netmap_detach(adapter->ifp);
2656 ixgbe_free_pci_resources(adapter);
2657 bus_generic_detach(dev);
2658 if_free(adapter->ifp);
2660 ixgbe_free_transmit_structures(adapter);
2661 ixgbe_free_receive_structures(adapter);
2662 free(adapter->queues, M_DEVBUF);
2663 free(adapter->mta, M_IXGBE);
2665 IXGBE_CORE_LOCK_DESTROY(adapter);
2668 } /* ixgbe_detach */
2670 /************************************************************************
2671 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2673 * Prepare the adapter/port for LPLU and/or WoL
2674 ************************************************************************/
2676 ixgbe_setup_low_power_mode(struct adapter *adapter)
2678 struct ixgbe_hw *hw = &adapter->hw;
2679 device_t dev = adapter->dev;
2682 mtx_assert(&adapter->core_mtx, MA_OWNED);
2684 /* Limit power management flow to X550EM baseT */
2685 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2686 hw->phy.ops.enter_lplu) {
2687 /* Turn off support for APM wakeup. (Using ACPI instead) */
2688 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2689 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2692 * Clear Wake Up Status register to prevent any previous wakeup
2693 * events from waking us up immediately after we suspend.
2695 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2698 * Program the Wakeup Filter Control register with user filter
2701 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2703 /* Enable wakeups and power management in Wakeup Control */
2704 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2705 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2707 /* X550EM baseT adapters need a special LPLU flow */
2708 hw->phy.reset_disable = true;
2709 ixgbe_stop(adapter);
2710 error = hw->phy.ops.enter_lplu(hw);
2712 device_printf(dev, "Error entering LPLU: %d\n", error);
2713 hw->phy.reset_disable = false;
2715 /* Just stop for other adapters */
2716 ixgbe_stop(adapter);
2720 } /* ixgbe_setup_low_power_mode */
2722 /************************************************************************
2723 * ixgbe_shutdown - Shutdown entry point
2724 ************************************************************************/
2726 ixgbe_shutdown(device_t dev)
2728 struct adapter *adapter = device_get_softc(dev);
2731 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2733 IXGBE_CORE_LOCK(adapter);
2734 error = ixgbe_setup_low_power_mode(adapter);
2735 IXGBE_CORE_UNLOCK(adapter);
2738 } /* ixgbe_shutdown */
2740 /************************************************************************
2744 ************************************************************************/
2746 ixgbe_suspend(device_t dev)
2748 struct adapter *adapter = device_get_softc(dev);
2751 INIT_DEBUGOUT("ixgbe_suspend: begin");
2753 IXGBE_CORE_LOCK(adapter);
2755 error = ixgbe_setup_low_power_mode(adapter);
2757 IXGBE_CORE_UNLOCK(adapter);
2760 } /* ixgbe_suspend */
2762 /************************************************************************
2766 ************************************************************************/
2768 ixgbe_resume(device_t dev)
2770 struct adapter *adapter = device_get_softc(dev);
2771 struct ifnet *ifp = adapter->ifp;
2772 struct ixgbe_hw *hw = &adapter->hw;
2775 INIT_DEBUGOUT("ixgbe_resume: begin");
2777 IXGBE_CORE_LOCK(adapter);
2779 /* Read & clear WUS register */
2780 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2782 device_printf(dev, "Woken up by (WUS): %#010x\n",
2783 IXGBE_READ_REG(hw, IXGBE_WUS));
2784 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2785 /* And clear WUFC until next low-power transition */
2786 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2789 * Required after D3->D0 transition;
2790 * will re-advertise all previous advertised speeds
2792 if (ifp->if_flags & IFF_UP)
2793 ixgbe_init_locked(adapter);
2795 IXGBE_CORE_UNLOCK(adapter);
2798 } /* ixgbe_resume */
2800 /************************************************************************
2801 * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2803 * Takes the ifnet's if_capenable flags (e.g. set by the user using
2804 * ifconfig) and indicates to the OS via the ifnet's if_hwassist
2805 * field what mbuf offload flags the driver will understand.
2806 ************************************************************************/
2808 ixgbe_set_if_hwassist(struct adapter *adapter)
2810 struct ifnet *ifp = adapter->ifp;
2812 ifp->if_hwassist = 0;
2813 #if __FreeBSD_version >= 1000000
2814 if (ifp->if_capenable & IFCAP_TSO4)
2815 ifp->if_hwassist |= CSUM_IP_TSO;
2816 if (ifp->if_capenable & IFCAP_TSO6)
2817 ifp->if_hwassist |= CSUM_IP6_TSO;
2818 if (ifp->if_capenable & IFCAP_TXCSUM) {
2819 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2820 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2821 ifp->if_hwassist |= CSUM_IP_SCTP;
2823 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2824 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2825 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2826 ifp->if_hwassist |= CSUM_IP6_SCTP;
2829 if (ifp->if_capenable & IFCAP_TSO)
2830 ifp->if_hwassist |= CSUM_TSO;
2831 if (ifp->if_capenable & IFCAP_TXCSUM) {
2832 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2833 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2834 ifp->if_hwassist |= CSUM_SCTP;
2837 } /* ixgbe_set_if_hwassist */
2839 /************************************************************************
2840 * ixgbe_init_locked - Init entry point
2842 * Used in two ways: It is used by the stack as an init
2843 * entry point in network interface structure. It is also
2844 * used by the driver as a hw/sw initialization routine to
2845 * get to a consistent state.
2847 * return 0 on success, positive on failure
2848 ************************************************************************/
2850 ixgbe_init_locked(struct adapter *adapter)
2852 struct ifnet *ifp = adapter->ifp;
2853 device_t dev = adapter->dev;
2854 struct ixgbe_hw *hw = &adapter->hw;
2855 struct tx_ring *txr;
2856 struct rx_ring *rxr;
2862 mtx_assert(&adapter->core_mtx, MA_OWNED);
2863 INIT_DEBUGOUT("ixgbe_init_locked: begin");
2865 hw->adapter_stopped = FALSE;
2866 ixgbe_stop_adapter(hw);
2867 callout_stop(&adapter->timer);
2869 /* Queue indices may change with IOV mode */
2870 ixgbe_align_all_queue_indices(adapter);
2872 /* reprogram the RAR[0] in case user changed it. */
2873 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2875 /* Get the latest mac address, User can use a LAA */
2876 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2877 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2878 hw->addr_ctrl.rar_used_count = 1;
2880 /* Set hardware offload abilities from ifnet flags */
2881 ixgbe_set_if_hwassist(adapter);
2883 /* Prepare transmit descriptors and buffers */
2884 if (ixgbe_setup_transmit_structures(adapter)) {
2885 device_printf(dev, "Could not setup transmit structures\n");
2886 ixgbe_stop(adapter);
2891 ixgbe_initialize_iov(adapter);
2892 ixgbe_initialize_transmit_units(adapter);
2894 /* Setup Multicast table */
2895 ixgbe_set_multi(adapter);
2897 /* Determine the correct mbuf pool, based on frame size */
2898 if (adapter->max_frame_size <= MCLBYTES)
2899 adapter->rx_mbuf_sz = MCLBYTES;
2901 adapter->rx_mbuf_sz = MJUMPAGESIZE;
2903 /* Prepare receive descriptors and buffers */
2904 if (ixgbe_setup_receive_structures(adapter)) {
2905 device_printf(dev, "Could not setup receive structures\n");
2906 ixgbe_stop(adapter);
2910 /* Configure RX settings */
2911 ixgbe_initialize_receive_units(adapter);
2913 /* Enable SDP & MSI-X interrupts based on adapter */
2914 ixgbe_config_gpie(adapter);
2917 if (ifp->if_mtu > ETHERMTU) {
2918 /* aka IXGBE_MAXFRS on 82599 and newer */
2919 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2920 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2921 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2922 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2925 /* Now enable all the queues */
2926 for (int i = 0; i < adapter->num_queues; i++) {
2927 txr = &adapter->tx_rings[i];
2928 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2929 txdctl |= IXGBE_TXDCTL_ENABLE;
2930 /* Set WTHRESH to 8, burst writeback */
2931 txdctl |= (8 << 16);
2933 * When the internal queue falls below PTHRESH (32),
2934 * start prefetching as long as there are at least
2935 * HTHRESH (1) buffers ready. The values are taken
2936 * from the Intel linux driver 3.8.21.
2937 * Prefetching enables tx line rate even with 1 queue.
2939 txdctl |= (32 << 0) | (1 << 8);
2940 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2943 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2944 rxr = &adapter->rx_rings[i];
2945 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2946 if (hw->mac.type == ixgbe_mac_82598EB) {
2952 rxdctl &= ~0x3FFFFF;
2955 rxdctl |= IXGBE_RXDCTL_ENABLE;
2956 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2957 for (; j < 10; j++) {
2958 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2959 IXGBE_RXDCTL_ENABLE)
2967 * In netmap mode, we must preserve the buffers made
2968 * available to userspace before the if_init()
2969 * (this is true by default on the TX side, because
2970 * init makes all buffers available to userspace).
2972 * netmap_reset() and the device specific routines
2973 * (e.g. ixgbe_setup_receive_rings()) map these
2974 * buffers at the end of the NIC ring, so here we
2975 * must set the RDT (tail) register to make sure
2976 * they are not overwritten.
2978 * In this driver the NIC ring starts at RDH = 0,
2979 * RDT points to the last slot available for reception (?),
2980 * so RDT = num_rx_desc - 1 means the whole ring is available.
2983 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
2984 (ifp->if_capenable & IFCAP_NETMAP)) {
2985 struct netmap_adapter *na = NA(adapter->ifp);
2986 struct netmap_kring *kring = &na->rx_rings[i];
2987 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2989 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
2991 #endif /* DEV_NETMAP */
2992 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
2993 adapter->num_rx_desc - 1);
2996 /* Enable Receive engine */
2997 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2998 if (hw->mac.type == ixgbe_mac_82598EB)
2999 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3000 rxctrl |= IXGBE_RXCTRL_RXEN;
3001 ixgbe_enable_rx_dma(hw, rxctrl);
3003 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3005 /* Set up MSI-X routing */
3006 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3007 ixgbe_configure_ivars(adapter);
3008 /* Set up auto-mask */
3009 if (hw->mac.type == ixgbe_mac_82598EB)
3010 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3012 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3013 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3015 } else { /* Simple settings for Legacy/MSI */
3016 ixgbe_set_ivar(adapter, 0, 0, 0);
3017 ixgbe_set_ivar(adapter, 0, 0, 1);
3018 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3021 ixgbe_init_fdir(adapter);
3024 * Check on any SFP devices that
3025 * need to be kick-started
3027 if (hw->phy.type == ixgbe_phy_none) {
3028 err = hw->phy.ops.identify(hw);
3029 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3031 "Unsupported SFP+ module type was detected.\n");
3036 /* Set moderation on the Link interrupt */
3037 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3039 /* Config/Enable Link */
3040 ixgbe_config_link(adapter);
3042 /* Hardware Packet Buffer & Flow Control setup */
3043 ixgbe_config_delay_values(adapter);
3045 /* Initialize the FC settings */
3048 /* Set up VLAN support and filter */
3049 ixgbe_setup_vlan_hw_support(adapter);
3051 /* Setup DMA Coalescing */
3052 ixgbe_config_dmac(adapter);
3054 /* And now turn on interrupts */
3055 ixgbe_enable_intr(adapter);
3057 /* Enable the use of the MBX by the VF's */
3058 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3059 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3060 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3061 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3064 /* Now inform the stack we're ready */
3065 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3068 } /* ixgbe_init_locked */
3070 /************************************************************************
3072 ************************************************************************/
3074 ixgbe_init(void *arg)
3076 struct adapter *adapter = arg;
3078 IXGBE_CORE_LOCK(adapter);
3079 ixgbe_init_locked(adapter);
3080 IXGBE_CORE_UNLOCK(adapter);
3085 /************************************************************************
3088 * Setup the correct IVAR register for a particular MSI-X interrupt
3089 * (yes this is all very magic and confusing :)
3090 * - entry is the register array entry
3091 * - vector is the MSI-X vector for this queue
3092 * - type is RX/TX/MISC
3093 ************************************************************************/
3095 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3097 struct ixgbe_hw *hw = &adapter->hw;
3100 vector |= IXGBE_IVAR_ALLOC_VAL;
3102 switch (hw->mac.type) {
3104 case ixgbe_mac_82598EB:
3106 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3108 entry += (type * 64);
3109 index = (entry >> 2) & 0x1F;
3110 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3111 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3112 ivar |= (vector << (8 * (entry & 0x3)));
3113 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3116 case ixgbe_mac_82599EB:
3117 case ixgbe_mac_X540:
3118 case ixgbe_mac_X550:
3119 case ixgbe_mac_X550EM_x:
3120 case ixgbe_mac_X550EM_a:
3121 if (type == -1) { /* MISC IVAR */
3122 index = (entry & 1) * 8;
3123 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3124 ivar &= ~(0xFF << index);
3125 ivar |= (vector << index);
3126 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3127 } else { /* RX/TX IVARS */
3128 index = (16 * (entry & 1)) + (8 * type);
3129 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3130 ivar &= ~(0xFF << index);
3131 ivar |= (vector << index);
3132 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3138 } /* ixgbe_set_ivar */
3140 /************************************************************************
3141 * ixgbe_configure_ivars
3142 ************************************************************************/
3144 ixgbe_configure_ivars(struct adapter *adapter)
3146 struct ix_queue *que = adapter->queues;
3149 if (ixgbe_max_interrupt_rate > 0)
3150 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3153 * Disable DMA coalescing if interrupt moderation is
3160 for (int i = 0; i < adapter->num_queues; i++, que++) {
3161 struct rx_ring *rxr = &adapter->rx_rings[i];
3162 struct tx_ring *txr = &adapter->tx_rings[i];
3163 /* First the RX queue entry */
3164 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3165 /* ... and the TX */
3166 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3167 /* Set an Initial EITR value */
3168 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3171 /* For the Link interrupt */
3172 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3173 } /* ixgbe_configure_ivars */
3175 /************************************************************************
3177 ************************************************************************/
3179 ixgbe_config_gpie(struct adapter *adapter)
3181 struct ixgbe_hw *hw = &adapter->hw;
3184 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3186 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3187 /* Enable Enhanced MSI-X mode */
3188 gpie |= IXGBE_GPIE_MSIX_MODE
3190 | IXGBE_GPIE_PBA_SUPPORT
3194 /* Fan Failure Interrupt */
3195 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3196 gpie |= IXGBE_SDP1_GPIEN;
3198 /* Thermal Sensor Interrupt */
3199 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3200 gpie |= IXGBE_SDP0_GPIEN_X540;
3202 /* Link detection */
3203 switch (hw->mac.type) {
3204 case ixgbe_mac_82599EB:
3205 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3207 case ixgbe_mac_X550EM_x:
3208 case ixgbe_mac_X550EM_a:
3209 gpie |= IXGBE_SDP0_GPIEN_X540;
3215 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3218 } /* ixgbe_config_gpie */
3220 /************************************************************************
3221 * ixgbe_config_delay_values
3223 * Requires adapter->max_frame_size to be set.
3224 ************************************************************************/
3226 ixgbe_config_delay_values(struct adapter *adapter)
3228 struct ixgbe_hw *hw = &adapter->hw;
3229 u32 rxpb, frame, size, tmp;
3231 frame = adapter->max_frame_size;
3233 /* Calculate High Water */
3234 switch (hw->mac.type) {
3235 case ixgbe_mac_X540:
3236 case ixgbe_mac_X550:
3237 case ixgbe_mac_X550EM_x:
3238 case ixgbe_mac_X550EM_a:
3239 tmp = IXGBE_DV_X540(frame, frame);
3242 tmp = IXGBE_DV(frame, frame);
3245 size = IXGBE_BT2KB(tmp);
3246 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3247 hw->fc.high_water[0] = rxpb - size;
3249 /* Now calculate Low Water */
3250 switch (hw->mac.type) {
3251 case ixgbe_mac_X540:
3252 case ixgbe_mac_X550:
3253 case ixgbe_mac_X550EM_x:
3254 case ixgbe_mac_X550EM_a:
3255 tmp = IXGBE_LOW_DV_X540(frame);
3258 tmp = IXGBE_LOW_DV(frame);
3261 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3263 hw->fc.pause_time = IXGBE_FC_PAUSE;
3264 hw->fc.send_xon = TRUE;
3265 } /* ixgbe_config_delay_values */
3267 /************************************************************************
3268 * ixgbe_set_multi - Multicast Update
3270 * Called whenever multicast address list is updated.
3271 ************************************************************************/
3273 ixgbe_set_multi(struct adapter *adapter)
3275 struct ifmultiaddr *ifma;
3276 struct ixgbe_mc_addr *mta;
3277 struct ifnet *ifp = adapter->ifp;
3282 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3285 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3287 #if __FreeBSD_version < 800000
3290 if_maddr_rlock(ifp);
3292 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3293 if (ifma->ifma_addr->sa_family != AF_LINK)
3295 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3297 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3298 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3299 mta[mcnt].vmdq = adapter->pool;
3302 #if __FreeBSD_version < 800000
3303 IF_ADDR_UNLOCK(ifp);
3305 if_maddr_runlock(ifp);
3308 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3309 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3310 if (ifp->if_flags & IFF_PROMISC)
3311 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3312 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3313 ifp->if_flags & IFF_ALLMULTI) {
3314 fctrl |= IXGBE_FCTRL_MPE;
3315 fctrl &= ~IXGBE_FCTRL_UPE;
3317 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3319 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3321 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3322 update_ptr = (u8 *)mta;
3323 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3324 ixgbe_mc_array_itr, TRUE);
3328 } /* ixgbe_set_multi */
3330 /************************************************************************
3331 * ixgbe_mc_array_itr
3333 * An iterator function needed by the multicast shared code.
3334 * It feeds the shared code routine the addresses in the
3335 * array of ixgbe_set_multi() one by one.
3336 ************************************************************************/
3338 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3340 struct ixgbe_mc_addr *mta;
3342 mta = (struct ixgbe_mc_addr *)*update_ptr;
3345 *update_ptr = (u8*)(mta + 1);
3348 } /* ixgbe_mc_array_itr */
3350 /************************************************************************
3351 * ixgbe_local_timer - Timer routine
3353 * Checks for link status, updates statistics,
3354 * and runs the watchdog check.
3355 ************************************************************************/
3357 ixgbe_local_timer(void *arg)
3359 struct adapter *adapter = arg;
3360 device_t dev = adapter->dev;
3361 struct ix_queue *que = adapter->queues;
3365 mtx_assert(&adapter->core_mtx, MA_OWNED);
3367 /* Check for pluggable optics */
3368 if (adapter->sfp_probe)
3369 if (!ixgbe_sfp_probe(adapter))
3370 goto out; /* Nothing to do */
3372 ixgbe_update_link_status(adapter);
3373 ixgbe_update_stats_counters(adapter);
3376 * Check the TX queues status
3377 * - mark hung queues so we don't schedule on them
3378 * - watchdog only if all queues show hung
3380 for (int i = 0; i < adapter->num_queues; i++, que++) {
3381 /* Keep track of queues with work for soft irq */
3383 queues |= ((u64)1 << que->me);
3385 * Each time txeof runs without cleaning, but there
3386 * are uncleaned descriptors it increments busy. If
3387 * we get to the MAX we declare it hung.
3389 if (que->busy == IXGBE_QUEUE_HUNG) {
3391 /* Mark the queue as inactive */
3392 adapter->active_queues &= ~((u64)1 << que->me);
3395 /* Check if we've come back from hung */
3396 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3397 adapter->active_queues |= ((u64)1 << que->me);
3399 if (que->busy >= IXGBE_MAX_TX_BUSY) {
3401 "Warning queue %d appears to be hung!\n", i);
3402 que->txr->busy = IXGBE_QUEUE_HUNG;
3407 /* Only truly watchdog if all queues show hung */
3408 if (hung == adapter->num_queues)
3410 else if (queues != 0) { /* Force an IRQ on queues with work */
3411 ixgbe_rearm_queues(adapter, queues);
3415 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3419 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3420 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3421 adapter->watchdog_events++;
3422 ixgbe_init_locked(adapter);
3423 } /* ixgbe_local_timer */
3425 /************************************************************************
3428 * Determine if a port had optics inserted.
3429 ************************************************************************/
3431 ixgbe_sfp_probe(struct adapter *adapter)
3433 struct ixgbe_hw *hw = &adapter->hw;
3434 device_t dev = adapter->dev;
3435 bool result = FALSE;
3437 if ((hw->phy.type == ixgbe_phy_nl) &&
3438 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3439 s32 ret = hw->phy.ops.identify_sfp(hw);
3442 ret = hw->phy.ops.reset(hw);
3443 adapter->sfp_probe = FALSE;
3444 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3445 device_printf(dev, "Unsupported SFP+ module detected!");
3447 "Reload driver with supported module.\n");
3450 device_printf(dev, "SFP+ module detected!\n");
3451 /* We now have supported optics */
3457 } /* ixgbe_sfp_probe */
3459 /************************************************************************
3460 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3461 ************************************************************************/
3463 ixgbe_handle_mod(void *context, int pending)
3465 struct adapter *adapter = context;
3466 struct ixgbe_hw *hw = &adapter->hw;
3467 device_t dev = adapter->dev;
3468 u32 err, cage_full = 0;
3470 if (adapter->hw.need_crosstalk_fix) {
3471 switch (hw->mac.type) {
3472 case ixgbe_mac_82599EB:
3473 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3476 case ixgbe_mac_X550EM_x:
3477 case ixgbe_mac_X550EM_a:
3478 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3489 err = hw->phy.ops.identify_sfp(hw);
3490 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3492 "Unsupported SFP+ module type was detected.\n");
3496 err = hw->mac.ops.setup_sfp(hw);
3497 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3499 "Setup failure - unsupported SFP+ module type.\n");
3502 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3503 } /* ixgbe_handle_mod */
3506 /************************************************************************
3507 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3508 ************************************************************************/
3510 ixgbe_handle_msf(void *context, int pending)
3512 struct adapter *adapter = context;
3513 struct ixgbe_hw *hw = &adapter->hw;
3517 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3518 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3520 autoneg = hw->phy.autoneg_advertised;
3521 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3522 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3523 if (hw->mac.ops.setup_link)
3524 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3526 /* Adjust media types shown in ifconfig */
3527 ifmedia_removeall(&adapter->media);
3528 ixgbe_add_media_types(adapter);
3529 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3530 } /* ixgbe_handle_msf */
3532 /************************************************************************
3533 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3534 ************************************************************************/
3536 ixgbe_handle_phy(void *context, int pending)
3538 struct adapter *adapter = context;
3539 struct ixgbe_hw *hw = &adapter->hw;
3542 error = hw->phy.ops.handle_lasi(hw);
3543 if (error == IXGBE_ERR_OVERTEMP)
3544 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3546 device_printf(adapter->dev,
3547 "Error handling LASI interrupt: %d\n", error);
3548 } /* ixgbe_handle_phy */
3550 /************************************************************************
3551 * ixgbe_stop - Stop the hardware
3553 * Disables all traffic on the adapter by issuing a
3554 * global reset on the MAC and deallocates TX/RX buffers.
3555 ************************************************************************/
3557 ixgbe_stop(void *arg)
3560 struct adapter *adapter = arg;
3561 struct ixgbe_hw *hw = &adapter->hw;
3565 mtx_assert(&adapter->core_mtx, MA_OWNED);
3567 INIT_DEBUGOUT("ixgbe_stop: begin\n");
3568 ixgbe_disable_intr(adapter);
3569 callout_stop(&adapter->timer);
3571 /* Let the stack know...*/
3572 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3575 hw->adapter_stopped = FALSE;
3576 ixgbe_stop_adapter(hw);
3577 if (hw->mac.type == ixgbe_mac_82599EB)
3578 ixgbe_stop_mac_link_on_d3_82599(hw);
3579 /* Turn off the laser - noop with no optics */
3580 ixgbe_disable_tx_laser(hw);
3582 /* Update the stack */
3583 adapter->link_up = FALSE;
3584 ixgbe_update_link_status(adapter);
3586 /* reprogram the RAR[0] in case user changed it. */
3587 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3592 /************************************************************************
3593 * ixgbe_update_link_status - Update OS on link state
3595 * Note: Only updates the OS on the cached link state.
3596 * The real check of the hardware only happens with
3598 ************************************************************************/
3600 ixgbe_update_link_status(struct adapter *adapter)
3602 struct ifnet *ifp = adapter->ifp;
3603 device_t dev = adapter->dev;
3605 if (adapter->link_up) {
3606 if (adapter->link_active == FALSE) {
3608 device_printf(dev, "Link is up %d Gbps %s \n",
3609 ((adapter->link_speed == 128) ? 10 : 1),
3611 adapter->link_active = TRUE;
3612 /* Update any Flow Control changes */
3613 ixgbe_fc_enable(&adapter->hw);
3614 /* Update DMA coalescing config */
3615 ixgbe_config_dmac(adapter);
3616 if_link_state_change(ifp, LINK_STATE_UP);
3617 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3618 ixgbe_ping_all_vfs(adapter);
3620 } else { /* Link down */
3621 if (adapter->link_active == TRUE) {
3623 device_printf(dev, "Link is Down\n");
3624 if_link_state_change(ifp, LINK_STATE_DOWN);
3625 adapter->link_active = FALSE;
3626 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3627 ixgbe_ping_all_vfs(adapter);
3632 } /* ixgbe_update_link_status */
3634 /************************************************************************
3635 * ixgbe_config_dmac - Configure DMA Coalescing
3636 ************************************************************************/
3638 ixgbe_config_dmac(struct adapter *adapter)
3640 struct ixgbe_hw *hw = &adapter->hw;
3641 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3643 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3646 if (dcfg->watchdog_timer ^ adapter->dmac ||
3647 dcfg->link_speed ^ adapter->link_speed) {
3648 dcfg->watchdog_timer = adapter->dmac;
3649 dcfg->fcoe_en = false;
3650 dcfg->link_speed = adapter->link_speed;
3653 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3654 dcfg->watchdog_timer, dcfg->link_speed);
3656 hw->mac.ops.dmac_config(hw);
3658 } /* ixgbe_config_dmac */
3660 /************************************************************************
3662 ************************************************************************/
3664 ixgbe_enable_intr(struct adapter *adapter)
3666 struct ixgbe_hw *hw = &adapter->hw;
3667 struct ix_queue *que = adapter->queues;
3670 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3672 switch (adapter->hw.mac.type) {
3673 case ixgbe_mac_82599EB:
3674 mask |= IXGBE_EIMS_ECC;
3675 /* Temperature sensor on some adapters */
3676 mask |= IXGBE_EIMS_GPI_SDP0;
3677 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3678 mask |= IXGBE_EIMS_GPI_SDP1;
3679 mask |= IXGBE_EIMS_GPI_SDP2;
3681 case ixgbe_mac_X540:
3682 /* Detect if Thermal Sensor is enabled */
3683 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3684 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3685 mask |= IXGBE_EIMS_TS;
3686 mask |= IXGBE_EIMS_ECC;
3688 case ixgbe_mac_X550:
3689 /* MAC thermal sensor is automatically enabled */
3690 mask |= IXGBE_EIMS_TS;
3691 mask |= IXGBE_EIMS_ECC;
3693 case ixgbe_mac_X550EM_x:
3694 case ixgbe_mac_X550EM_a:
3695 /* Some devices use SDP0 for important information */
3696 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3697 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3698 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3699 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3700 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3701 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3702 mask |= IXGBE_EICR_GPI_SDP0_X540;
3703 mask |= IXGBE_EIMS_ECC;
3709 /* Enable Fan Failure detection */
3710 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3711 mask |= IXGBE_EIMS_GPI_SDP1;
3713 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3714 mask |= IXGBE_EIMS_MAILBOX;
3715 /* Enable Flow Director */
3716 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3717 mask |= IXGBE_EIMS_FLOW_DIR;
3719 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3721 /* With MSI-X we use auto clear */
3722 if (adapter->msix_mem) {
3723 mask = IXGBE_EIMS_ENABLE_MASK;
3724 /* Don't autoclear Link */
3725 mask &= ~IXGBE_EIMS_OTHER;
3726 mask &= ~IXGBE_EIMS_LSC;
3727 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3728 mask &= ~IXGBE_EIMS_MAILBOX;
3729 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3733 * Now enable all queues, this is done separately to
3734 * allow for handling the extended (beyond 32) MSI-X
3735 * vectors that can be used by 82599
3737 for (int i = 0; i < adapter->num_queues; i++, que++)
3738 ixgbe_enable_queue(adapter, que->msix);
3740 IXGBE_WRITE_FLUSH(hw);
3743 } /* ixgbe_enable_intr */
3745 /************************************************************************
3746 * ixgbe_disable_intr
3747 ************************************************************************/
3749 ixgbe_disable_intr(struct adapter *adapter)
3751 if (adapter->msix_mem)
3752 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3753 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3754 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3756 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3757 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3758 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3760 IXGBE_WRITE_FLUSH(&adapter->hw);
3763 } /* ixgbe_disable_intr */
3765 /************************************************************************
3766 * ixgbe_legacy_irq - Legacy Interrupt Service routine
3767 ************************************************************************/
3769 ixgbe_legacy_irq(void *arg)
3771 struct ix_queue *que = arg;
3772 struct adapter *adapter = que->adapter;
3773 struct ixgbe_hw *hw = &adapter->hw;
3774 struct ifnet *ifp = adapter->ifp;
3775 struct tx_ring *txr = adapter->tx_rings;
3777 u32 eicr, eicr_mask;
3779 /* Silicon errata #26 on 82598 */
3780 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3782 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3786 ixgbe_enable_intr(adapter);
3790 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3791 more = ixgbe_rxeof(que);
3795 if (!ixgbe_ring_empty(ifp, txr->br))
3796 ixgbe_start_locked(ifp, txr);
3797 IXGBE_TX_UNLOCK(txr);
3800 /* Check for fan failure */
3801 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3802 ixgbe_check_fan_failure(adapter, eicr, true);
3803 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3806 /* Link status change */
3807 if (eicr & IXGBE_EICR_LSC)
3808 taskqueue_enqueue(adapter->tq, &adapter->link_task);
3810 if (ixgbe_is_sfp(hw)) {
3811 /* Pluggable optics-related interrupt */
3812 if (hw->mac.type >= ixgbe_mac_X540)
3813 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3815 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3817 if (eicr & eicr_mask) {
3818 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3819 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3822 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3823 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3824 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3825 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3826 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3830 /* External PHY interrupt */
3831 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3832 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3833 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
3836 taskqueue_enqueue(que->tq, &que->que_task);
3838 ixgbe_enable_intr(adapter);
3841 } /* ixgbe_legacy_irq */
3843 /************************************************************************
3844 * ixgbe_free_pci_resources
3845 ************************************************************************/
3847 ixgbe_free_pci_resources(struct adapter *adapter)
3849 struct ix_queue *que = adapter->queues;
3850 device_t dev = adapter->dev;
3853 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3854 memrid = PCIR_BAR(MSIX_82598_BAR);
3856 memrid = PCIR_BAR(MSIX_82599_BAR);
3859 * There is a slight possibility of a failure mode
3860 * in attach that will result in entering this function
3861 * before interrupt resources have been initialized, and
3862 * in that case we do not want to execute the loops below
3863 * We can detect this reliably by the state of the adapter
3866 if (adapter->res == NULL)
3870 * Release all msix queue resources:
3872 for (int i = 0; i < adapter->num_queues; i++, que++) {
3873 rid = que->msix + 1;
3874 if (que->tag != NULL) {
3875 bus_teardown_intr(dev, que->res, que->tag);
3878 if (que->res != NULL)
3879 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
3883 if (adapter->tag != NULL) {
3884 bus_teardown_intr(dev, adapter->res, adapter->tag);
3885 adapter->tag = NULL;
3888 /* Clean the Legacy or Link interrupt last */
3889 if (adapter->res != NULL)
3890 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3894 if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3895 (adapter->feat_en & IXGBE_FEATURE_MSIX))
3896 pci_release_msi(dev);
3898 if (adapter->msix_mem != NULL)
3899 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3902 if (adapter->pci_mem != NULL)
3903 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3907 } /* ixgbe_free_pci_resources */
3909 /************************************************************************
3910 * ixgbe_set_sysctl_value
3911 ************************************************************************/
3913 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3914 const char *description, int *limit, int value)
3917 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3918 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3919 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
3920 } /* ixgbe_set_sysctl_value */
3922 /************************************************************************
3923 * ixgbe_sysctl_flowcntl
3925 * SYSCTL wrapper around setting Flow Control
3926 ************************************************************************/
3928 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3930 struct adapter *adapter;
3933 adapter = (struct adapter *)arg1;
3934 fc = adapter->hw.fc.current_mode;
3936 error = sysctl_handle_int(oidp, &fc, 0, req);
3937 if ((error) || (req->newptr == NULL))
3940 /* Don't bother if it's not changed */
3941 if (fc == adapter->hw.fc.current_mode)
3944 return ixgbe_set_flowcntl(adapter, fc);
3945 } /* ixgbe_sysctl_flowcntl */
3947 /************************************************************************
3948 * ixgbe_set_flowcntl - Set flow control
3950 * Flow control values:
3955 ************************************************************************/
3957 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3960 case ixgbe_fc_rx_pause:
3961 case ixgbe_fc_tx_pause:
3963 adapter->hw.fc.requested_mode = fc;
3964 if (adapter->num_queues > 1)
3965 ixgbe_disable_rx_drop(adapter);
3968 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3969 if (adapter->num_queues > 1)
3970 ixgbe_enable_rx_drop(adapter);
3976 /* Don't autoneg if forcing a value */
3977 adapter->hw.fc.disable_fc_autoneg = TRUE;
3978 ixgbe_fc_enable(&adapter->hw);
3981 } /* ixgbe_set_flowcntl */
3983 /************************************************************************
3984 * ixgbe_enable_rx_drop
3986 * Enable the hardware to drop packets when the buffer is
3987 * full. This is useful with multiqueue, so that no single
3988 * queue being full stalls the entire RX engine. We only
3989 * enable this when Multiqueue is enabled AND Flow Control
3991 ************************************************************************/
3993 ixgbe_enable_rx_drop(struct adapter *adapter)
3995 struct ixgbe_hw *hw = &adapter->hw;
3996 struct rx_ring *rxr;
3999 for (int i = 0; i < adapter->num_queues; i++) {
4000 rxr = &adapter->rx_rings[i];
4001 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4002 srrctl |= IXGBE_SRRCTL_DROP_EN;
4003 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4006 /* enable drop for each vf */
4007 for (int i = 0; i < adapter->num_vfs; i++) {
4008 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4009 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4012 } /* ixgbe_enable_rx_drop */
4014 /************************************************************************
4015 * ixgbe_disable_rx_drop
4016 ************************************************************************/
4018 ixgbe_disable_rx_drop(struct adapter *adapter)
4020 struct ixgbe_hw *hw = &adapter->hw;
4021 struct rx_ring *rxr;
4024 for (int i = 0; i < adapter->num_queues; i++) {
4025 rxr = &adapter->rx_rings[i];
4026 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4027 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4028 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4031 /* disable drop for each vf */
4032 for (int i = 0; i < adapter->num_vfs; i++) {
4033 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4034 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4036 } /* ixgbe_disable_rx_drop */
4038 /************************************************************************
4039 * ixgbe_sysctl_advertise
4041 * SYSCTL wrapper around setting advertised speed
4042 ************************************************************************/
4044 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4046 struct adapter *adapter;
4047 int error, advertise;
4049 adapter = (struct adapter *)arg1;
4050 advertise = adapter->advertise;
4052 error = sysctl_handle_int(oidp, &advertise, 0, req);
4053 if ((error) || (req->newptr == NULL))
4056 return ixgbe_set_advertise(adapter, advertise);
4057 } /* ixgbe_sysctl_advertise */
4059 /************************************************************************
4060 * ixgbe_set_advertise - Control advertised link speed
4063 * 0x1 - advertise 100 Mb
4064 * 0x2 - advertise 1G
4065 * 0x4 - advertise 10G
4066 * 0x8 - advertise 10 Mb (yes, Mb)
4067 ************************************************************************/
4069 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4072 struct ixgbe_hw *hw;
4073 ixgbe_link_speed speed = 0;
4074 ixgbe_link_speed link_caps = 0;
4075 s32 err = IXGBE_NOT_IMPLEMENTED;
4076 bool negotiate = FALSE;
4078 /* Checks to validate new value */
4079 if (adapter->advertise == advertise) /* no change */
4085 /* No speed changes for backplane media */
4086 if (hw->phy.media_type == ixgbe_media_type_backplane)
4089 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4090 (hw->phy.multispeed_fiber))) {
4091 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4095 if (advertise < 0x1 || advertise > 0xF) {
4096 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4100 if (hw->mac.ops.get_link_capabilities) {
4101 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4103 if (err != IXGBE_SUCCESS) {
4104 device_printf(dev, "Unable to determine supported advertise speeds\n");
4109 /* Set new value and report new advertised mode */
4110 if (advertise & 0x1) {
4111 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4112 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4115 speed |= IXGBE_LINK_SPEED_100_FULL;
4117 if (advertise & 0x2) {
4118 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4119 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4122 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4124 if (advertise & 0x4) {
4125 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4126 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4129 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4131 if (advertise & 0x8) {
4132 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4133 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4136 speed |= IXGBE_LINK_SPEED_10_FULL;
4139 hw->mac.autotry_restart = TRUE;
4140 hw->mac.ops.setup_link(hw, speed, TRUE);
4141 adapter->advertise = advertise;
4144 } /* ixgbe_set_advertise */
4146 /************************************************************************
4147 * ixgbe_get_advertise - Get current advertised speed settings
4149 * Formatted for sysctl usage.
4151 * 0x1 - advertise 100 Mb
4152 * 0x2 - advertise 1G
4153 * 0x4 - advertise 10G
4154 * 0x8 - advertise 10 Mb (yes, Mb)
4155 ************************************************************************/
4157 ixgbe_get_advertise(struct adapter *adapter)
4159 struct ixgbe_hw *hw = &adapter->hw;
4161 ixgbe_link_speed link_caps = 0;
4163 bool negotiate = FALSE;
4166 * Advertised speed means nothing unless it's copper or
4169 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4170 !(hw->phy.multispeed_fiber))
4173 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4174 if (err != IXGBE_SUCCESS)
4178 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4179 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4180 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4181 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4184 } /* ixgbe_get_advertise */
4186 /************************************************************************
4187 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4190 * 0/1 - off / on (use default value of 1000)
4192 * Legal timer values are:
4193 * 50,100,250,500,1000,2000,5000,10000
4195 * Turning off interrupt moderation will also turn this off.
4196 ************************************************************************/
4198 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4200 struct adapter *adapter = (struct adapter *)arg1;
4201 struct ifnet *ifp = adapter->ifp;
4205 newval = adapter->dmac;
4206 error = sysctl_handle_int(oidp, &newval, 0, req);
4207 if ((error) || (req->newptr == NULL))
4216 /* Enable and use default */
4217 adapter->dmac = 1000;
4227 /* Legal values - allow */
4228 adapter->dmac = newval;
4231 /* Do nothing, illegal value */
4235 /* Re-initialize hardware if it's already running */
4236 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4237 ixgbe_init(adapter);
4240 } /* ixgbe_sysctl_dmac */
4243 /************************************************************************
4244 * ixgbe_sysctl_power_state
4246 * Sysctl to test power states
4248 * 0 - set device to D0
4249 * 3 - set device to D3
4250 * (none) - get current device power state
4251 ************************************************************************/
4253 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4255 struct adapter *adapter = (struct adapter *)arg1;
4256 device_t dev = adapter->dev;
4257 int curr_ps, new_ps, error = 0;
4259 curr_ps = new_ps = pci_get_powerstate(dev);
4261 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4262 if ((error) || (req->newptr == NULL))
4265 if (new_ps == curr_ps)
4268 if (new_ps == 3 && curr_ps == 0)
4269 error = DEVICE_SUSPEND(dev);
4270 else if (new_ps == 0 && curr_ps == 3)
4271 error = DEVICE_RESUME(dev);
4275 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4278 } /* ixgbe_sysctl_power_state */
4281 /************************************************************************
4282 * ixgbe_sysctl_wol_enable
4284 * Sysctl to enable/disable the WoL capability,
4285 * if supported by the adapter.
4290 ************************************************************************/
4292 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4294 struct adapter *adapter = (struct adapter *)arg1;
4295 struct ixgbe_hw *hw = &adapter->hw;
4296 int new_wol_enabled;
4299 new_wol_enabled = hw->wol_enabled;
4300 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4301 if ((error) || (req->newptr == NULL))
4303 new_wol_enabled = !!(new_wol_enabled);
4304 if (new_wol_enabled == hw->wol_enabled)
4307 if (new_wol_enabled > 0 && !adapter->wol_support)
4310 hw->wol_enabled = new_wol_enabled;
4313 } /* ixgbe_sysctl_wol_enable */
4315 /************************************************************************
4316 * ixgbe_sysctl_wufc - Wake Up Filter Control
4318 * Sysctl to enable/disable the types of packets that the
4319 * adapter will wake up on upon receipt.
4321 * 0x1 - Link Status Change
4322 * 0x2 - Magic Packet
4323 * 0x4 - Direct Exact
4324 * 0x8 - Directed Multicast
4326 * 0x20 - ARP/IPv4 Request Packet
4327 * 0x40 - Direct IPv4 Packet
4328 * 0x80 - Direct IPv6 Packet
4330 * Settings not listed above will cause the sysctl to return an error.
4331 ************************************************************************/
4333 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4335 struct adapter *adapter = (struct adapter *)arg1;
4339 new_wufc = adapter->wufc;
4341 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4342 if ((error) || (req->newptr == NULL))
4344 if (new_wufc == adapter->wufc)
4347 if (new_wufc & 0xffffff00)
4351 new_wufc |= (0xffffff & adapter->wufc);
4352 adapter->wufc = new_wufc;
4355 } /* ixgbe_sysctl_wufc */
4358 /************************************************************************
4359 * ixgbe_sysctl_print_rss_config
4360 ************************************************************************/
4362 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4364 struct adapter *adapter = (struct adapter *)arg1;
4365 struct ixgbe_hw *hw = &adapter->hw;
4366 device_t dev = adapter->dev;
4368 int error = 0, reta_size;
4371 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4373 device_printf(dev, "Could not allocate sbuf for output.\n");
4377 // TODO: use sbufs to make a string to print out
4378 /* Set multiplier for RETA setup and table size based on MAC */
4379 switch (adapter->hw.mac.type) {
4380 case ixgbe_mac_X550:
4381 case ixgbe_mac_X550EM_x:
4382 case ixgbe_mac_X550EM_a:
4390 /* Print out the redirection table */
4391 sbuf_cat(buf, "\n");
4392 for (int i = 0; i < reta_size; i++) {
4394 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4395 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4397 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4398 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4402 // TODO: print more config
4404 error = sbuf_finish(buf);
4406 device_printf(dev, "Error finishing sbuf: %d\n", error);
4411 } /* ixgbe_sysctl_print_rss_config */
4412 #endif /* IXGBE_DEBUG */
4414 /************************************************************************
4415 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4417 * For X552/X557-AT devices using an external PHY
4418 ************************************************************************/
4420 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4422 struct adapter *adapter = (struct adapter *)arg1;
4423 struct ixgbe_hw *hw = &adapter->hw;
4426 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4427 device_printf(adapter->dev,
4428 "Device has no supported external thermal sensor.\n");
4432 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4433 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4434 device_printf(adapter->dev,
4435 "Error reading from PHY's current temperature register\n");
4439 /* Shift temp for output */
4442 return (sysctl_handle_int(oidp, NULL, reg, req));
4443 } /* ixgbe_sysctl_phy_temp */
4445 /************************************************************************
4446 * ixgbe_sysctl_phy_overtemp_occurred
4448 * Reports (directly from the PHY) whether the current PHY
4449 * temperature is over the overtemp threshold.
4450 ************************************************************************/
4452 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4454 struct adapter *adapter = (struct adapter *)arg1;
4455 struct ixgbe_hw *hw = &adapter->hw;
4458 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4459 device_printf(adapter->dev,
4460 "Device has no supported external thermal sensor.\n");
4464 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4465 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4466 device_printf(adapter->dev,
4467 "Error reading from PHY's temperature status register\n");
4471 /* Get occurrence bit */
4472 reg = !!(reg & 0x4000);
4474 return (sysctl_handle_int(oidp, 0, reg, req));
4475 } /* ixgbe_sysctl_phy_overtemp_occurred */
4477 /************************************************************************
4478 * ixgbe_sysctl_eee_state
4480 * Sysctl to set EEE power saving feature
4484 * (none) - get current device EEE state
4485 ************************************************************************/
4487 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4489 struct adapter *adapter = (struct adapter *)arg1;
4490 device_t dev = adapter->dev;
4491 int curr_eee, new_eee, error = 0;
4494 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4496 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4497 if ((error) || (req->newptr == NULL))
4501 if (new_eee == curr_eee)
4505 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4508 /* Bounds checking */
4509 if ((new_eee < 0) || (new_eee > 1))
4512 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4514 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4518 /* Restart auto-neg */
4519 ixgbe_init(adapter);
4521 device_printf(dev, "New EEE state: %d\n", new_eee);
4523 /* Cache new value */
4525 adapter->feat_en |= IXGBE_FEATURE_EEE;
4527 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4530 } /* ixgbe_sysctl_eee_state */
4532 /************************************************************************
4533 * ixgbe_init_device_features
4534 ************************************************************************/
4536 ixgbe_init_device_features(struct adapter *adapter)
4538 adapter->feat_cap = IXGBE_FEATURE_NETMAP
4541 | IXGBE_FEATURE_MSIX
4542 | IXGBE_FEATURE_LEGACY_IRQ
4543 | IXGBE_FEATURE_LEGACY_TX;
4545 /* Set capabilities first... */
4546 switch (adapter->hw.mac.type) {
4547 case ixgbe_mac_82598EB:
4548 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4549 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4551 case ixgbe_mac_X540:
4552 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4553 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4554 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4555 (adapter->hw.bus.func == 0))
4556 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4558 case ixgbe_mac_X550:
4559 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4560 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4561 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4563 case ixgbe_mac_X550EM_x:
4564 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4565 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4566 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4567 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4569 case ixgbe_mac_X550EM_a:
4570 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4571 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4572 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4573 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4574 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4575 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4576 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4579 case ixgbe_mac_82599EB:
4580 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4581 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4582 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4583 (adapter->hw.bus.func == 0))
4584 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4585 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4586 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4592 /* Enabled by default... */
4593 /* Fan failure detection */
4594 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4595 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4597 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4598 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4600 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4601 adapter->feat_en |= IXGBE_FEATURE_EEE;
4602 /* Thermal Sensor */
4603 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4604 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4606 /* Enabled via global sysctl... */
4608 if (ixgbe_enable_fdir) {
4609 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4610 adapter->feat_en |= IXGBE_FEATURE_FDIR;
4612 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4614 /* Legacy (single queue) transmit */
4615 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4616 ixgbe_enable_legacy_tx)
4617 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4619 * Message Signal Interrupts - Extended (MSI-X)
4620 * Normal MSI is only enabled if MSI-X calls fail.
4622 if (!ixgbe_enable_msix)
4623 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4624 /* Receive-Side Scaling (RSS) */
4625 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4626 adapter->feat_en |= IXGBE_FEATURE_RSS;
4628 /* Disable features with unmet dependencies... */
4630 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4631 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4632 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4633 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4634 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4636 } /* ixgbe_init_device_features */
4638 /************************************************************************
4639 * ixgbe_probe - Device identification routine
4641 * Determines if the driver should be loaded on
4642 * adapter based on its PCI vendor/device ID.
4644 * return BUS_PROBE_DEFAULT on success, positive on failure
4645 ************************************************************************/
4647 ixgbe_probe(device_t dev)
4649 ixgbe_vendor_info_t *ent;
4651 u16 pci_vendor_id = 0;
4652 u16 pci_device_id = 0;
4653 u16 pci_subvendor_id = 0;
4654 u16 pci_subdevice_id = 0;
4655 char adapter_name[256];
4657 INIT_DEBUGOUT("ixgbe_probe: begin");
4659 pci_vendor_id = pci_get_vendor(dev);
4660 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4663 pci_device_id = pci_get_device(dev);
4664 pci_subvendor_id = pci_get_subvendor(dev);
4665 pci_subdevice_id = pci_get_subdevice(dev);
4667 ent = ixgbe_vendor_info_array;
4668 while (ent->vendor_id != 0) {
4669 if ((pci_vendor_id == ent->vendor_id) &&
4670 (pci_device_id == ent->device_id) &&
4671 ((pci_subvendor_id == ent->subvendor_id) ||
4672 (ent->subvendor_id == 0)) &&
4673 ((pci_subdevice_id == ent->subdevice_id) ||
4674 (ent->subdevice_id == 0))) {
4675 sprintf(adapter_name, "%s, Version - %s",
4676 ixgbe_strings[ent->index],
4677 ixgbe_driver_version);
4678 device_set_desc_copy(dev, adapter_name);
4679 ++ixgbe_total_ports;
4680 return (BUS_PROBE_DEFAULT);
4689 /************************************************************************
4690 * ixgbe_ioctl - Ioctl entry point
4692 * Called when the user wants to configure the interface.
4694 * return 0 on success, positive on failure
4695 ************************************************************************/
4697 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4699 struct adapter *adapter = ifp->if_softc;
4700 struct ifreq *ifr = (struct ifreq *) data;
4701 #if defined(INET) || defined(INET6)
4702 struct ifaddr *ifa = (struct ifaddr *)data;
4705 bool avoid_reset = FALSE;
4710 if (ifa->ifa_addr->sa_family == AF_INET)
4714 if (ifa->ifa_addr->sa_family == AF_INET6)
4718 * Calling init results in link renegotiation,
4719 * so we avoid doing it when possible.
4722 ifp->if_flags |= IFF_UP;
4723 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4724 ixgbe_init(adapter);
4726 if (!(ifp->if_flags & IFF_NOARP))
4727 arp_ifinit(ifp, ifa);
4730 error = ether_ioctl(ifp, command, data);
4733 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4734 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4737 IXGBE_CORE_LOCK(adapter);
4738 ifp->if_mtu = ifr->ifr_mtu;
4739 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4740 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4741 ixgbe_init_locked(adapter);
4742 ixgbe_recalculate_max_frame(adapter);
4743 IXGBE_CORE_UNLOCK(adapter);
4747 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4748 IXGBE_CORE_LOCK(adapter);
4749 if (ifp->if_flags & IFF_UP) {
4750 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4751 if ((ifp->if_flags ^ adapter->if_flags) &
4752 (IFF_PROMISC | IFF_ALLMULTI)) {
4753 ixgbe_set_promisc(adapter);
4756 ixgbe_init_locked(adapter);
4758 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4759 ixgbe_stop(adapter);
4760 adapter->if_flags = ifp->if_flags;
4761 IXGBE_CORE_UNLOCK(adapter);
4765 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4766 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4767 IXGBE_CORE_LOCK(adapter);
4768 ixgbe_disable_intr(adapter);
4769 ixgbe_set_multi(adapter);
4770 ixgbe_enable_intr(adapter);
4771 IXGBE_CORE_UNLOCK(adapter);
4776 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4777 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4781 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4783 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4788 /* HW cannot turn these on/off separately */
4789 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4790 ifp->if_capenable ^= IFCAP_RXCSUM;
4791 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4793 if (mask & IFCAP_TXCSUM)
4794 ifp->if_capenable ^= IFCAP_TXCSUM;
4795 if (mask & IFCAP_TXCSUM_IPV6)
4796 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4797 if (mask & IFCAP_TSO4)
4798 ifp->if_capenable ^= IFCAP_TSO4;
4799 if (mask & IFCAP_TSO6)
4800 ifp->if_capenable ^= IFCAP_TSO6;
4801 if (mask & IFCAP_LRO)
4802 ifp->if_capenable ^= IFCAP_LRO;
4803 if (mask & IFCAP_VLAN_HWTAGGING)
4804 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4805 if (mask & IFCAP_VLAN_HWFILTER)
4806 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4807 if (mask & IFCAP_VLAN_HWTSO)
4808 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4810 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4811 IXGBE_CORE_LOCK(adapter);
4812 ixgbe_init_locked(adapter);
4813 IXGBE_CORE_UNLOCK(adapter);
4815 VLAN_CAPABILITIES(ifp);
4818 #if __FreeBSD_version >= 1100036
4821 struct ixgbe_hw *hw = &adapter->hw;
4822 struct ifi2creq i2c;
4825 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4826 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
4829 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4833 if (i2c.len > sizeof(i2c.data)) {
4838 for (i = 0; i < i2c.len; i++)
4839 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4840 i2c.dev_addr, &i2c.data[i]);
4841 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
4846 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4847 error = ether_ioctl(ifp, command, data);
4854 /************************************************************************
4855 * ixgbe_check_fan_failure
4856 ************************************************************************/
4858 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4862 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4866 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4867 } /* ixgbe_check_fan_failure */
4869 /************************************************************************
4871 ************************************************************************/
4873 ixgbe_handle_que(void *context, int pending)
4875 struct ix_queue *que = context;
4876 struct adapter *adapter = que->adapter;
4877 struct tx_ring *txr = que->txr;
4878 struct ifnet *ifp = adapter->ifp;
4880 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4884 if (!ixgbe_ring_empty(ifp, txr->br))
4885 ixgbe_start_locked(ifp, txr);
4886 IXGBE_TX_UNLOCK(txr);
4889 /* Re-enable this interrupt */
4890 if (que->res != NULL)
4891 ixgbe_enable_queue(adapter, que->msix);
4893 ixgbe_enable_intr(adapter);
4896 } /* ixgbe_handle_que */
4900 /************************************************************************
4901 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4902 ************************************************************************/
4904 ixgbe_allocate_legacy(struct adapter *adapter)
4906 device_t dev = adapter->dev;
4907 struct ix_queue *que = adapter->queues;
4908 struct tx_ring *txr = adapter->tx_rings;
4911 /* We allocate a single interrupt resource */
4912 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4913 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4914 if (adapter->res == NULL) {
4916 "Unable to allocate bus resource: interrupt\n");
4921 * Try allocating a fast interrupt and the associated deferred
4922 * processing contexts.
4924 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
4925 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
4926 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
4927 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
4928 taskqueue_thread_enqueue, &que->tq);
4929 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
4930 device_get_nameunit(adapter->dev));
4932 /* Tasklets for Link, SFP and Multispeed Fiber */
4933 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
4934 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
4935 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
4936 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
4937 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4938 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
4939 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
4940 taskqueue_thread_enqueue, &adapter->tq);
4941 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
4942 device_get_nameunit(adapter->dev));
4944 if ((error = bus_setup_intr(dev, adapter->res,
4945 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
4946 &adapter->tag)) != 0) {
4948 "Failed to register fast interrupt handler: %d\n", error);
4949 taskqueue_free(que->tq);
4950 taskqueue_free(adapter->tq);
4956 /* For simplicity in the handlers */
4957 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
4960 } /* ixgbe_allocate_legacy */
4963 /************************************************************************
4964 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
4965 ************************************************************************/
4967 ixgbe_allocate_msix(struct adapter *adapter)
4969 device_t dev = adapter->dev;
4970 struct ix_queue *que = adapter->queues;
4971 struct tx_ring *txr = adapter->tx_rings;
4972 int error, rid, vector = 0;
4974 unsigned int rss_buckets = 0;
4978 * If we're doing RSS, the number of queues needs to
4979 * match the number of RSS buckets that are configured.
4981 * + If there's more queues than RSS buckets, we'll end
4982 * up with queues that get no traffic.
4984 * + If there's more RSS buckets than queues, we'll end
4985 * up having multiple RSS buckets map to the same queue,
4986 * so there'll be some contention.
4988 rss_buckets = rss_getnumbuckets();
4989 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
4990 (adapter->num_queues != rss_buckets)) {
4991 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
4992 __func__, adapter->num_queues, rss_buckets);
4995 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
4997 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
4998 RF_SHAREABLE | RF_ACTIVE);
4999 if (que->res == NULL) {
5000 device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5004 /* Set the handler function */
5005 error = bus_setup_intr(dev, que->res,
5006 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5010 device_printf(dev, "Failed to register QUE handler");
5013 #if __FreeBSD_version >= 800504
5014 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5017 adapter->active_queues |= (u64)(1 << que->msix);
5019 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5021 * The queue ID is used as the RSS layer bucket ID.
5022 * We look up the queue ID -> RSS CPU ID and select
5025 cpu_id = rss_getcpu(i % rss_buckets);
5026 CPU_SETOF(cpu_id, &cpu_mask);
5029 * Bind the MSI-X vector, and thus the
5030 * rings to the corresponding CPU.
5032 * This just happens to match the default RSS
5033 * round-robin bucket -> queue -> CPU allocation.
5035 if (adapter->num_queues > 1)
5038 if (adapter->num_queues > 1)
5039 bus_bind_intr(dev, que->res, cpu_id);
5041 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5042 device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5045 device_printf(dev, "Bound queue %d to cpu %d\n", i,
5047 #endif /* IXGBE_DEBUG */
5050 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5051 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5053 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5054 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5055 taskqueue_thread_enqueue, &que->tq);
5056 #if __FreeBSD_version < 1100000
5057 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5058 device_get_nameunit(adapter->dev), i);
5060 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5061 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5062 &cpu_mask, "%s (bucket %d)",
5063 device_get_nameunit(adapter->dev), cpu_id);
5065 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5066 NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5072 adapter->link_rid = vector + 1;
5073 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5074 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5075 if (!adapter->res) {
5077 "Unable to allocate bus resource: Link interrupt [%d]\n",
5081 /* Set the link handler function */
5082 error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5083 NULL, ixgbe_msix_link, adapter, &adapter->tag);
5085 adapter->res = NULL;
5086 device_printf(dev, "Failed to register LINK handler");
5089 #if __FreeBSD_version >= 800504
5090 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5092 adapter->vector = vector;
5093 /* Tasklets for Link, SFP and Multispeed Fiber */
5094 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
5095 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
5096 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
5097 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5098 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
5099 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
5100 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5101 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
5102 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
5103 taskqueue_thread_enqueue, &adapter->tq);
5104 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
5105 device_get_nameunit(adapter->dev));
5108 } /* ixgbe_allocate_msix */
5110 /************************************************************************
5111 * ixgbe_configure_interrupts
5113 * Setup MSI-X, MSI, or legacy interrupts (in that order).
5114 * This will also depend on user settings.
5115 ************************************************************************/
5117 ixgbe_configure_interrupts(struct adapter *adapter)
5119 device_t dev = adapter->dev;
5120 int rid, want, queues, msgs;
5122 /* Default to 1 queue if MSI-X setup fails */
5123 adapter->num_queues = 1;
5125 /* Override by tuneable */
5126 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5129 /* First try MSI-X */
5130 msgs = pci_msix_count(dev);
5133 rid = PCIR_BAR(MSIX_82598_BAR);
5134 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5136 if (adapter->msix_mem == NULL) {
5137 rid += 4; /* 82599 maps in higher BAR */
5138 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5141 if (adapter->msix_mem == NULL) {
5142 /* May not be enabled */
5143 device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5147 /* Figure out a reasonable auto config value */
5148 queues = min(mp_ncpus, msgs - 1);
5149 /* If we're doing RSS, clamp at the number of RSS buckets */
5150 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5151 queues = min(queues, rss_getnumbuckets());
5152 if (ixgbe_num_queues > queues) {
5153 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5154 ixgbe_num_queues = queues;
5157 if (ixgbe_num_queues != 0)
5158 queues = ixgbe_num_queues;
5159 /* Set max queues to 8 when autoconfiguring */
5161 queues = min(queues, 8);
5163 /* reflect correct sysctl value */
5164 ixgbe_num_queues = queues;
5167 * Want one vector (RX/TX pair) per queue
5168 * plus an additional for Link.
5174 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5178 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5179 device_printf(adapter->dev,
5180 "Using MSI-X interrupts with %d vectors\n", msgs);
5181 adapter->num_queues = queues;
5182 adapter->feat_en |= IXGBE_FEATURE_MSIX;
5186 * MSI-X allocation failed or provided us with
5187 * less vectors than needed. Free MSI-X resources
5188 * and we'll try enabling MSI.
5190 pci_release_msi(dev);
5193 /* Without MSI-X, some features are no longer supported */
5194 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5195 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5196 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5197 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5199 if (adapter->msix_mem != NULL) {
5200 bus_release_resource(dev, SYS_RES_MEMORY, rid,
5202 adapter->msix_mem = NULL;
5205 if (pci_alloc_msi(dev, &msgs) == 0) {
5206 adapter->feat_en |= IXGBE_FEATURE_MSI;
5207 adapter->link_rid = 1;
5208 device_printf(adapter->dev, "Using an MSI interrupt\n");
5212 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5213 device_printf(adapter->dev,
5214 "Device does not support legacy interrupts.\n");
5218 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5219 adapter->link_rid = 0;
5220 device_printf(adapter->dev, "Using a Legacy interrupt\n");
5223 } /* ixgbe_configure_interrupts */
5226 /************************************************************************
5227 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5229 * Done outside of interrupt context since the driver might sleep
5230 ************************************************************************/
5232 ixgbe_handle_link(void *context, int pending)
5234 struct adapter *adapter = context;
5235 struct ixgbe_hw *hw = &adapter->hw;
5237 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5238 ixgbe_update_link_status(adapter);
5240 /* Re-enable link interrupts */
5241 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5242 } /* ixgbe_handle_link */
5244 /************************************************************************
5245 * ixgbe_rearm_queues
5246 ************************************************************************/
5248 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5252 switch (adapter->hw.mac.type) {
5253 case ixgbe_mac_82598EB:
5254 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5255 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5257 case ixgbe_mac_82599EB:
5258 case ixgbe_mac_X540:
5259 case ixgbe_mac_X550:
5260 case ixgbe_mac_X550EM_x:
5261 case ixgbe_mac_X550EM_a:
5262 mask = (queues & 0xFFFFFFFF);
5263 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5264 mask = (queues >> 32);
5265 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5270 } /* ixgbe_rearm_queues */