1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
44 /************************************************************************
46 ************************************************************************/
47 char ixgbe_driver_version[] = "3.2.12-k";
50 /************************************************************************
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
105 /* required last entry */
109 /************************************************************************
110 * Table of branding strings
111 ************************************************************************/
112 static char *ixgbe_strings[] = {
113 "Intel(R) PRO/10GbE PCI-Express Network Driver"
116 /************************************************************************
117 * Function prototypes
118 ************************************************************************/
119 static int ixgbe_probe(device_t);
120 static int ixgbe_attach(device_t);
121 static int ixgbe_detach(device_t);
122 static int ixgbe_shutdown(device_t);
123 static int ixgbe_suspend(device_t);
124 static int ixgbe_resume(device_t);
125 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
126 static void ixgbe_init(void *);
127 static void ixgbe_init_locked(struct adapter *);
128 static void ixgbe_stop(void *);
129 #if __FreeBSD_version >= 1100036
130 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
132 static void ixgbe_init_device_features(struct adapter *);
133 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
134 static void ixgbe_add_media_types(struct adapter *);
135 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136 static int ixgbe_media_change(struct ifnet *);
137 static int ixgbe_allocate_pci_resources(struct adapter *);
138 static void ixgbe_get_slot_info(struct adapter *);
139 static int ixgbe_allocate_msix(struct adapter *);
140 static int ixgbe_allocate_legacy(struct adapter *);
141 static int ixgbe_configure_interrupts(struct adapter *);
142 static void ixgbe_free_pci_resources(struct adapter *);
143 static void ixgbe_local_timer(void *);
144 static int ixgbe_setup_interface(device_t, struct adapter *);
145 static void ixgbe_config_gpie(struct adapter *);
146 static void ixgbe_config_dmac(struct adapter *);
147 static void ixgbe_config_delay_values(struct adapter *);
148 static void ixgbe_config_link(struct adapter *);
149 static void ixgbe_check_wol_support(struct adapter *);
150 static int ixgbe_setup_low_power_mode(struct adapter *);
151 static void ixgbe_rearm_queues(struct adapter *, u64);
153 static void ixgbe_initialize_transmit_units(struct adapter *);
154 static void ixgbe_initialize_receive_units(struct adapter *);
155 static void ixgbe_enable_rx_drop(struct adapter *);
156 static void ixgbe_disable_rx_drop(struct adapter *);
157 static void ixgbe_initialize_rss_mapping(struct adapter *);
159 static void ixgbe_enable_intr(struct adapter *, bool);
160 static void ixgbe_disable_intr(struct adapter *, bool);
161 static void ixgbe_update_stats_counters(struct adapter *);
162 static void ixgbe_set_promisc(struct adapter *);
163 static void ixgbe_set_multi(struct adapter *);
164 static void ixgbe_update_link_status(struct adapter *);
165 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166 static void ixgbe_configure_ivars(struct adapter *);
167 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
169 static void ixgbe_setup_vlan_hw_support(struct adapter *);
170 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
171 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
173 static void ixgbe_add_device_sysctls(struct adapter *);
174 static void ixgbe_add_hw_stats(struct adapter *);
175 static int ixgbe_set_flowcntl(struct adapter *, int);
176 static int ixgbe_set_advertise(struct adapter *, int);
177 static int ixgbe_get_advertise(struct adapter *);
179 /* Sysctl handlers */
180 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
181 const char *, int *, int);
182 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
189 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
192 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
200 /* Support for pluggable optic modules */
201 static bool ixgbe_sfp_probe(struct adapter *);
203 /* Legacy (single vector) interrupt handler */
204 static void ixgbe_legacy_irq(void *);
206 /* The MSI/MSI-X Interrupt handlers */
207 static void ixgbe_msix_que(void *);
208 static void ixgbe_msix_link(void *);
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_que(void *, int);
212 static void ixgbe_handle_link(void *);
213 static void ixgbe_handle_msf(void *);
214 static void ixgbe_handle_mod(void *);
215 static void ixgbe_handle_phy(void *);
216 static void ixgbe_handle_admin_task(void *, int);
219 /************************************************************************
220 * FreeBSD Device Interface Entry Points
221 ************************************************************************/
222 static device_method_t ix_methods[] = {
223 /* Device interface */
224 DEVMETHOD(device_probe, ixgbe_probe),
225 DEVMETHOD(device_attach, ixgbe_attach),
226 DEVMETHOD(device_detach, ixgbe_detach),
227 DEVMETHOD(device_shutdown, ixgbe_shutdown),
228 DEVMETHOD(device_suspend, ixgbe_suspend),
229 DEVMETHOD(device_resume, ixgbe_resume),
231 DEVMETHOD(pci_iov_init, ixgbe_init_iov),
232 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
233 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
238 static driver_t ix_driver = {
239 "ix", ix_methods, sizeof(struct adapter),
242 devclass_t ix_devclass;
243 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
245 MODULE_DEPEND(ix, pci, 1, 1, 1);
246 MODULE_DEPEND(ix, ether, 1, 1, 1);
248 MODULE_DEPEND(ix, netmap, 1, 1, 1);
252 * TUNEABLE PARAMETERS:
255 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
258 * AIM: Adaptive Interrupt Moderation
259 * which means that the interrupt rate
260 * is varied over time based on the
261 * traffic for that interrupt vector
263 static int ixgbe_enable_aim = TRUE;
264 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
265 "Enable adaptive interrupt moderation");
267 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
268 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
269 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
271 /* How many packets rxeof tries to clean at a time */
272 static int ixgbe_rx_process_limit = 256;
273 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
274 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
276 /* How many packets txeof tries to clean at a time */
277 static int ixgbe_tx_process_limit = 256;
278 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
279 &ixgbe_tx_process_limit, 0,
280 "Maximum number of sent packets to process at a time, -1 means unlimited");
282 /* Flow control setting, default to full */
283 static int ixgbe_flow_control = ixgbe_fc_full;
284 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
285 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
287 /* Advertise Speed, default to 0 (auto) */
288 static int ixgbe_advertise_speed = 0;
289 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
290 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
293 * Smart speed setting, default to on
294 * this only works as a compile option
295 * right now as its during attach, set
296 * this to 'ixgbe_smart_speed_off' to
299 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
302 * MSI-X should be the default for best performance,
303 * but this allows it to be forced off for testing.
305 static int ixgbe_enable_msix = 1;
306 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
307 "Enable MSI-X interrupts");
310 * Number of Queues, can be set to 0,
311 * it then autoconfigures based on the
312 * number of cpus with a max of 8. This
313 * can be overriden manually here.
315 static int ixgbe_num_queues = 0;
316 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
317 "Number of queues to configure, 0 indicates autoconfigure");
320 * Number of TX descriptors per ring,
321 * setting higher than RX as this seems
322 * the better performing choice.
324 static int ixgbe_txd = PERFORM_TXD;
325 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
326 "Number of transmit descriptors per queue");
328 /* Number of RX descriptors per ring */
329 static int ixgbe_rxd = PERFORM_RXD;
330 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
331 "Number of receive descriptors per queue");
334 * Defining this on will allow the use
335 * of unsupported SFP+ modules, note that
336 * doing so you are on your own :)
338 static int allow_unsupported_sfp = FALSE;
339 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
340 &allow_unsupported_sfp, 0,
341 "Allow unsupported SFP modules...use at your own risk");
344 * Not sure if Flow Director is fully baked,
345 * so we'll default to turning it off.
347 static int ixgbe_enable_fdir = 0;
348 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
349 "Enable Flow Director");
351 /* Legacy Transmit (single queue) */
352 static int ixgbe_enable_legacy_tx = 0;
353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
354 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
356 /* Receive-Side Scaling */
357 static int ixgbe_enable_rss = 1;
358 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
359 "Enable Receive-Side Scaling (RSS)");
361 /* Keep running tab on them for sanity check */
362 static int ixgbe_total_ports;
364 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
365 static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
367 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
369 /************************************************************************
370 * ixgbe_initialize_rss_mapping
371 ************************************************************************/
373 ixgbe_initialize_rss_mapping(struct adapter *adapter)
375 struct ixgbe_hw *hw = &adapter->hw;
376 u32 reta = 0, mrqc, rss_key[10];
377 int queue_id, table_size, index_mult;
381 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
382 /* Fetch the configured RSS key */
383 rss_getkey((uint8_t *)&rss_key);
385 /* set up random bits */
386 arc4rand(&rss_key, sizeof(rss_key), 0);
389 /* Set multiplier for RETA setup and table size based on MAC */
392 switch (adapter->hw.mac.type) {
393 case ixgbe_mac_82598EB:
397 case ixgbe_mac_X550EM_x:
398 case ixgbe_mac_X550EM_a:
405 /* Set up the redirection table */
406 for (i = 0, j = 0; i < table_size; i++, j++) {
407 if (j == adapter->num_queues)
410 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
412 * Fetch the RSS bucket id for the given indirection
413 * entry. Cap it at the number of configured buckets
414 * (which is num_queues.)
416 queue_id = rss_get_indirection_to_bucket(i);
417 queue_id = queue_id % adapter->num_queues;
419 queue_id = (j * index_mult);
422 * The low 8 bits are for hash value (n+0);
423 * The next 8 bits are for hash value (n+1), etc.
426 reta = reta | (((uint32_t)queue_id) << 24);
429 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
431 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
437 /* Now fill our hash function seeds */
438 for (i = 0; i < 10; i++)
439 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
441 /* Perform hash on these packet types */
442 if (adapter->feat_en & IXGBE_FEATURE_RSS)
443 rss_hash_config = rss_gethashconfig();
446 * Disable UDP - IP fragments aren't currently being handled
447 * and so we end up with a mix of 2-tuple and 4-tuple
450 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
451 | RSS_HASHTYPE_RSS_TCP_IPV4
452 | RSS_HASHTYPE_RSS_IPV6
453 | RSS_HASHTYPE_RSS_TCP_IPV6
454 | RSS_HASHTYPE_RSS_IPV6_EX
455 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
458 mrqc = IXGBE_MRQC_RSSEN;
459 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
460 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
461 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
462 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
463 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
464 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
465 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
466 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
467 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
468 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
469 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
470 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
471 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
472 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
473 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
474 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
476 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
477 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
478 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
479 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
480 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
481 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
482 } /* ixgbe_initialize_rss_mapping */
484 /************************************************************************
485 * ixgbe_initialize_receive_units - Setup receive registers and features.
486 ************************************************************************/
487 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
490 ixgbe_initialize_receive_units(struct adapter *adapter)
492 struct rx_ring *rxr = adapter->rx_rings;
493 struct ixgbe_hw *hw = &adapter->hw;
494 struct ifnet *ifp = adapter->ifp;
496 u32 bufsz, fctrl, srrctl, rxcsum;
500 * Make sure receives are disabled while
501 * setting up the descriptor ring
503 ixgbe_disable_rx(hw);
505 /* Enable broadcasts */
506 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
507 fctrl |= IXGBE_FCTRL_BAM;
508 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
509 fctrl |= IXGBE_FCTRL_DPF;
510 fctrl |= IXGBE_FCTRL_PMCF;
512 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
514 /* Set for Jumbo Frames? */
515 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
516 if (ifp->if_mtu > ETHERMTU)
517 hlreg |= IXGBE_HLREG0_JUMBOEN;
519 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
522 /* CRC stripping is conditional in Netmap */
523 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
524 (ifp->if_capenable & IFCAP_NETMAP) &&
526 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
528 #endif /* DEV_NETMAP */
529 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
531 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
533 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
534 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
536 for (i = 0; i < adapter->num_queues; i++, rxr++) {
537 u64 rdba = rxr->rxdma.dma_paddr;
540 /* Setup the Base and Length of the Rx Descriptor Ring */
541 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
542 (rdba & 0x00000000ffffffffULL));
543 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
544 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
545 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
547 /* Set up the SRRCTL register */
548 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
549 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
550 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
552 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
555 * Set DROP_EN iff we have no flow control and >1 queue.
556 * Note that srrctl was cleared shortly before during reset,
557 * so we do not need to clear the bit, but do it just in case
558 * this code is moved elsewhere.
560 if (adapter->num_queues > 1 &&
561 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
562 srrctl |= IXGBE_SRRCTL_DROP_EN;
564 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
567 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
569 /* Setup the HW Rx Head and Tail Descriptor Pointers */
570 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
571 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
573 /* Set the driver rx tail address */
574 rxr->tail = IXGBE_RDT(rxr->me);
577 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
578 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
579 | IXGBE_PSRTYPE_UDPHDR
580 | IXGBE_PSRTYPE_IPV4HDR
581 | IXGBE_PSRTYPE_IPV6HDR;
582 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
585 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
587 ixgbe_initialize_rss_mapping(adapter);
589 if (adapter->num_queues > 1) {
590 /* RSS and RX IPP Checksum are mutually exclusive */
591 rxcsum |= IXGBE_RXCSUM_PCSD;
594 if (ifp->if_capenable & IFCAP_RXCSUM)
595 rxcsum |= IXGBE_RXCSUM_PCSD;
597 /* This is useful for calculating UDP/IP fragment checksums */
598 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
599 rxcsum |= IXGBE_RXCSUM_IPPCSE;
601 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
604 } /* ixgbe_initialize_receive_units */
606 /************************************************************************
607 * ixgbe_initialize_transmit_units - Enable transmit units.
608 ************************************************************************/
610 ixgbe_initialize_transmit_units(struct adapter *adapter)
612 struct tx_ring *txr = adapter->tx_rings;
613 struct ixgbe_hw *hw = &adapter->hw;
615 /* Setup the Base and Length of the Tx Descriptor Ring */
616 for (int i = 0; i < adapter->num_queues; i++, txr++) {
617 u64 tdba = txr->txdma.dma_paddr;
621 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
622 (tdba & 0x00000000ffffffffULL));
623 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
624 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
625 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
627 /* Setup the HW Tx Head and Tail descriptor pointers */
628 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
629 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
631 /* Cache the tail address */
632 txr->tail = IXGBE_TDT(j);
634 /* Disable Head Writeback */
636 * Note: for X550 series devices, these registers are actually
637 * prefixed with TPH_ isntead of DCA_, but the addresses and
638 * fields remain the same.
640 switch (hw->mac.type) {
641 case ixgbe_mac_82598EB:
642 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
645 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
648 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
649 switch (hw->mac.type) {
650 case ixgbe_mac_82598EB:
651 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
654 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
660 if (hw->mac.type != ixgbe_mac_82598EB) {
661 u32 dmatxctl, rttdcs;
663 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
664 dmatxctl |= IXGBE_DMATXCTL_TE;
665 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
666 /* Disable arbiter to set MTQC */
667 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
668 rttdcs |= IXGBE_RTTDCS_ARBDIS;
669 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
670 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
671 ixgbe_get_mtqc(adapter->iov_mode));
672 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
673 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
677 } /* ixgbe_initialize_transmit_units */
679 /************************************************************************
680 * ixgbe_attach - Device initialization routine
682 * Called when the driver is being loaded.
683 * Identifies the type of hardware, allocates all resources
684 * and initializes the hardware.
686 * return 0 on success, positive on failure
687 ************************************************************************/
689 ixgbe_attach(device_t dev)
691 struct adapter *adapter;
696 INIT_DEBUGOUT("ixgbe_attach: begin");
698 /* Allocate, clear, and link in our adapter structure */
699 adapter = device_get_softc(dev);
700 adapter->hw.back = adapter;
705 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
707 /* Set up the timer callout */
708 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
710 /* Determine hardware revision */
711 hw->vendor_id = pci_get_vendor(dev);
712 hw->device_id = pci_get_device(dev);
713 hw->revision_id = pci_get_revid(dev);
714 hw->subsystem_vendor_id = pci_get_subvendor(dev);
715 hw->subsystem_device_id = pci_get_subdevice(dev);
718 * Make sure BUSMASTER is set
720 pci_enable_busmaster(dev);
722 /* Do base PCI setup - map BAR0 */
723 if (ixgbe_allocate_pci_resources(adapter)) {
724 device_printf(dev, "Allocation of PCI resources failed\n");
729 /* let hardware know driver is loaded */
730 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
731 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
732 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
734 hw->allow_unsupported_sfp = allow_unsupported_sfp;
737 * Initialize the shared code
739 if (ixgbe_init_shared_code(hw)) {
740 device_printf(dev, "Unable to initialize the shared code\n");
745 if (hw->mbx.ops.init_params)
746 hw->mbx.ops.init_params(hw);
749 /* Pick up the 82599 settings */
750 if (hw->mac.type != ixgbe_mac_82598EB) {
751 hw->phy.smart_speed = ixgbe_smart_speed;
752 adapter->num_segs = IXGBE_82599_SCATTER;
754 adapter->num_segs = IXGBE_82598_SCATTER;
756 ixgbe_init_device_features(adapter);
758 if (ixgbe_configure_interrupts(adapter)) {
763 /* Allocate multicast array memory. */
764 adapter->mta = malloc(sizeof(*adapter->mta) *
765 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
766 if (adapter->mta == NULL) {
767 device_printf(dev, "Can not allocate multicast setup array\n");
772 /* Enable WoL (if supported) */
773 ixgbe_check_wol_support(adapter);
775 /* Register for VLAN events */
776 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
777 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
778 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
779 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
781 /* Verify adapter fan is still functional (if applicable) */
782 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
783 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
784 ixgbe_check_fan_failure(adapter, esdp, FALSE);
787 /* Ensure SW/FW semaphore is free */
788 ixgbe_init_swfw_semaphore(hw);
790 /* Enable EEE power saving */
791 if (adapter->feat_en & IXGBE_FEATURE_EEE)
792 hw->mac.ops.setup_eee(hw, TRUE);
794 /* Set an initial default flow control value */
795 hw->fc.requested_mode = ixgbe_flow_control;
797 /* Sysctls for limiting the amount of work done in the taskqueues */
798 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
799 "max number of rx packets to process",
800 &adapter->rx_process_limit, ixgbe_rx_process_limit);
802 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
803 "max number of tx packets to process",
804 &adapter->tx_process_limit, ixgbe_tx_process_limit);
806 /* Do descriptor calc and sanity checks */
807 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
808 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
809 device_printf(dev, "TXD config issue, using default!\n");
810 adapter->num_tx_desc = DEFAULT_TXD;
812 adapter->num_tx_desc = ixgbe_txd;
815 * With many RX rings it is easy to exceed the
816 * system mbuf allocation. Tuning nmbclusters
817 * can alleviate this.
819 if (nmbclusters > 0) {
821 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
822 if (s > nmbclusters) {
823 device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
824 ixgbe_rxd = DEFAULT_RXD;
828 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
829 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
830 device_printf(dev, "RXD config issue, using default!\n");
831 adapter->num_rx_desc = DEFAULT_RXD;
833 adapter->num_rx_desc = ixgbe_rxd;
835 /* Allocate our TX/RX Queues */
836 if (ixgbe_allocate_queues(adapter)) {
841 hw->phy.reset_if_overtemp = TRUE;
842 error = ixgbe_reset_hw(hw);
843 hw->phy.reset_if_overtemp = FALSE;
844 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
846 * No optics in this port, set up
847 * so the timer routine will probe
848 * for later insertion.
850 adapter->sfp_probe = TRUE;
851 error = IXGBE_SUCCESS;
852 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
853 device_printf(dev, "Unsupported SFP+ module detected!\n");
857 device_printf(dev, "Hardware initialization failed\n");
862 /* Make sure we have a good EEPROM before we read from it */
863 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
864 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
869 /* Setup OS specific network interface */
870 if (ixgbe_setup_interface(dev, adapter) != 0)
873 if (adapter->feat_en & IXGBE_FEATURE_MSIX)
874 error = ixgbe_allocate_msix(adapter);
876 error = ixgbe_allocate_legacy(adapter);
880 error = ixgbe_start_hw(hw);
882 case IXGBE_ERR_EEPROM_VERSION:
883 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
885 case IXGBE_ERR_SFP_NOT_SUPPORTED:
886 device_printf(dev, "Unsupported SFP+ Module\n");
889 case IXGBE_ERR_SFP_NOT_PRESENT:
890 device_printf(dev, "No SFP+ Module found\n");
896 /* Enable the optics for 82599 SFP+ fiber */
897 ixgbe_enable_tx_laser(hw);
899 /* Enable power to the phy. */
900 ixgbe_set_phy_power(hw, TRUE);
902 /* Initialize statistics */
903 ixgbe_update_stats_counters(adapter);
905 /* Check PCIE slot type/speed/width */
906 ixgbe_get_slot_info(adapter);
909 * Do time init and sysctl init here, but
910 * only on the first port of a bypass adapter.
912 ixgbe_bypass_init(adapter);
914 /* Set an initial dmac value */
916 /* Set initial advertised speeds (if applicable) */
917 adapter->advertise = ixgbe_get_advertise(adapter);
919 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
920 ixgbe_define_iov_schemas(dev, &error);
923 ixgbe_add_device_sysctls(adapter);
924 ixgbe_add_hw_stats(adapter);
927 adapter->init_locked = ixgbe_init_locked;
928 adapter->stop_locked = ixgbe_stop;
930 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
931 ixgbe_netmap_attach(adapter);
933 /* Initialize Admin Task */
934 TASK_INIT(&adapter->admin_task, 0, ixgbe_handle_admin_task, adapter);
936 /* Initialize task queue */
937 adapter->tq = taskqueue_create_fast("ixgbe_admin", M_NOWAIT,
938 taskqueue_thread_enqueue, &adapter->tq);
939 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s admintaskq",
940 device_get_nameunit(adapter->dev));
942 INIT_DEBUGOUT("ixgbe_attach: end");
947 ixgbe_free_transmit_structures(adapter);
948 ixgbe_free_receive_structures(adapter);
949 free(adapter->queues, M_DEVBUF);
951 if (adapter->ifp != NULL)
952 if_free(adapter->ifp);
953 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
954 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
955 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
956 ixgbe_free_pci_resources(adapter);
957 free(adapter->mta, M_IXGBE);
958 IXGBE_CORE_LOCK_DESTROY(adapter);
963 /************************************************************************
964 * ixgbe_check_wol_support
966 * Checks whether the adapter's ports are capable of
967 * Wake On LAN by reading the adapter's NVM.
969 * Sets each port's hw->wol_enabled value depending
970 * on the value read here.
971 ************************************************************************/
973 ixgbe_check_wol_support(struct adapter *adapter)
975 struct ixgbe_hw *hw = &adapter->hw;
978 /* Find out WoL support for port */
979 adapter->wol_support = hw->wol_enabled = 0;
980 ixgbe_get_device_caps(hw, &dev_caps);
981 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
982 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
984 adapter->wol_support = hw->wol_enabled = 1;
986 /* Save initial wake up filter configuration */
987 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
990 } /* ixgbe_check_wol_support */
992 /************************************************************************
993 * ixgbe_setup_interface
995 * Setup networking device structure and register an interface.
996 ************************************************************************/
998 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1002 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1004 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1006 device_printf(dev, "can not allocate ifnet structure\n");
1009 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1010 ifp->if_baudrate = IF_Gbps(10);
1011 ifp->if_init = ixgbe_init;
1012 ifp->if_softc = adapter;
1013 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1014 ifp->if_ioctl = ixgbe_ioctl;
1015 #if __FreeBSD_version >= 1100036
1016 if_setgetcounterfn(ifp, ixgbe_get_counter);
1018 #if __FreeBSD_version >= 1100045
1019 /* TSO parameters */
1020 ifp->if_hw_tsomax = 65518;
1021 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1022 ifp->if_hw_tsomaxsegsize = 2048;
1024 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1025 ifp->if_start = ixgbe_legacy_start;
1026 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1027 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1028 IFQ_SET_READY(&ifp->if_snd);
1029 ixgbe_start_locked = ixgbe_legacy_start_locked;
1030 ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1032 ifp->if_transmit = ixgbe_mq_start;
1033 ifp->if_qflush = ixgbe_qflush;
1034 ixgbe_start_locked = ixgbe_mq_start_locked;
1035 ixgbe_ring_empty = drbr_empty;
1038 ether_ifattach(ifp, adapter->hw.mac.addr);
1040 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1043 * Tell the upper layer(s) we support long frames.
1045 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1047 /* Set capability flags */
1048 ifp->if_capabilities |= IFCAP_HWCSUM
1052 | IFCAP_VLAN_HWTAGGING
1059 /* Enable the above capabilities by default */
1060 ifp->if_capenable = ifp->if_capabilities;
1063 * Don't turn this on by default, if vlans are
1064 * created on another pseudo device (eg. lagg)
1065 * then vlan events are not passed thru, breaking
1066 * operation, but with HW FILTER off it works. If
1067 * using vlans directly on the ixgbe driver you can
1068 * enable this and get full hardware tag filtering.
1070 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1073 * Specify the media types supported by this adapter and register
1074 * callbacks to update media and link information
1076 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1077 ixgbe_media_status);
1079 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1080 ixgbe_add_media_types(adapter);
1082 /* Set autoselect media by default */
1083 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1086 } /* ixgbe_setup_interface */
1088 #if __FreeBSD_version >= 1100036
1089 /************************************************************************
1091 ************************************************************************/
1093 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
1095 struct adapter *adapter;
1096 struct tx_ring *txr;
1099 adapter = if_getsoftc(ifp);
1102 case IFCOUNTER_IPACKETS:
1103 return (adapter->ipackets);
1104 case IFCOUNTER_OPACKETS:
1105 return (adapter->opackets);
1106 case IFCOUNTER_IBYTES:
1107 return (adapter->ibytes);
1108 case IFCOUNTER_OBYTES:
1109 return (adapter->obytes);
1110 case IFCOUNTER_IMCASTS:
1111 return (adapter->imcasts);
1112 case IFCOUNTER_OMCASTS:
1113 return (adapter->omcasts);
1114 case IFCOUNTER_COLLISIONS:
1116 case IFCOUNTER_IQDROPS:
1117 return (adapter->iqdrops);
1118 case IFCOUNTER_OQDROPS:
1120 txr = adapter->tx_rings;
1121 for (int i = 0; i < adapter->num_queues; i++, txr++)
1122 rv += txr->br->br_drops;
1124 case IFCOUNTER_IERRORS:
1125 return (adapter->ierrors);
1127 return (if_get_counter_default(ifp, cnt));
1129 } /* ixgbe_get_counter */
1132 /************************************************************************
1133 * ixgbe_add_media_types
1134 ************************************************************************/
1136 ixgbe_add_media_types(struct adapter *adapter)
1138 struct ixgbe_hw *hw = &adapter->hw;
1139 device_t dev = adapter->dev;
1142 layer = adapter->phy_layer;
1144 /* Media types with matching FreeBSD media defines */
1145 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1146 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1147 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1148 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1149 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1150 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1151 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1152 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1154 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1155 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1156 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1159 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1160 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1161 if (hw->phy.multispeed_fiber)
1162 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1165 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1166 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1167 if (hw->phy.multispeed_fiber)
1168 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1170 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1171 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1172 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1173 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1175 #ifdef IFM_ETH_XTYPE
1176 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1177 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1178 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1179 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1180 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1181 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1182 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1183 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1185 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1186 device_printf(dev, "Media supported: 10GbaseKR\n");
1187 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1188 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1190 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1191 device_printf(dev, "Media supported: 10GbaseKX4\n");
1192 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1193 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1195 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1196 device_printf(dev, "Media supported: 1000baseKX\n");
1197 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1198 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1200 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1201 device_printf(dev, "Media supported: 2500baseKX\n");
1202 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1203 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1206 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1207 device_printf(dev, "Media supported: 1000baseBX\n");
1209 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1210 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1212 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1215 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1216 } /* ixgbe_add_media_types */
1218 /************************************************************************
1220 ************************************************************************/
1222 ixgbe_is_sfp(struct ixgbe_hw *hw)
1224 switch (hw->mac.type) {
1225 case ixgbe_mac_82598EB:
1226 if (hw->phy.type == ixgbe_phy_nl)
1229 case ixgbe_mac_82599EB:
1230 switch (hw->mac.ops.get_media_type(hw)) {
1231 case ixgbe_media_type_fiber:
1232 case ixgbe_media_type_fiber_qsfp:
1237 case ixgbe_mac_X550EM_x:
1238 case ixgbe_mac_X550EM_a:
1239 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1245 } /* ixgbe_is_sfp */
1247 /************************************************************************
1249 ************************************************************************/
1251 ixgbe_config_link(struct adapter *adapter)
1253 struct ixgbe_hw *hw = &adapter->hw;
1254 u32 autoneg, err = 0;
1255 bool sfp, negotiate;
1257 sfp = ixgbe_is_sfp(hw);
1260 if (hw->phy.multispeed_fiber) {
1261 hw->mac.ops.setup_sfp(hw);
1262 ixgbe_enable_tx_laser(hw);
1263 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
1264 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
1266 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1267 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
1270 if (hw->mac.ops.check_link)
1271 err = ixgbe_check_link(hw, &adapter->link_speed,
1272 &adapter->link_up, FALSE);
1275 autoneg = hw->phy.autoneg_advertised;
1276 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1277 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1281 if (hw->mac.ops.setup_link)
1282 err = hw->mac.ops.setup_link(hw, autoneg,
1288 } /* ixgbe_config_link */
1290 /************************************************************************
1291 * ixgbe_update_stats_counters - Update board statistics counters.
1292 ************************************************************************/
1294 ixgbe_update_stats_counters(struct adapter *adapter)
1296 struct ixgbe_hw *hw = &adapter->hw;
1297 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1298 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1299 u64 total_missed_rx = 0;
1301 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1302 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1303 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1304 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1305 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1307 for (int i = 0; i < 16; i++) {
1308 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1309 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1310 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1312 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1313 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1314 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1316 /* Hardware workaround, gprc counts missed packets */
1317 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1318 stats->gprc -= missed_rx;
1320 if (hw->mac.type != ixgbe_mac_82598EB) {
1321 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1322 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1323 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1324 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1325 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1326 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1327 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1328 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1330 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1331 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1332 /* 82598 only has a counter in the high register */
1333 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1334 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1335 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1339 * Workaround: mprc hardware is incorrectly counting
1340 * broadcasts, so for now we subtract those.
1342 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1343 stats->bprc += bprc;
1344 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1345 if (hw->mac.type == ixgbe_mac_82598EB)
1346 stats->mprc -= bprc;
1348 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1349 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1350 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1351 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1352 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1353 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1355 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1356 stats->lxontxc += lxon;
1357 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1358 stats->lxofftxc += lxoff;
1359 total = lxon + lxoff;
1361 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1362 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1363 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1364 stats->gptc -= total;
1365 stats->mptc -= total;
1366 stats->ptc64 -= total;
1367 stats->gotc -= total * ETHER_MIN_LEN;
1369 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1370 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1371 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1372 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1373 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1374 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1375 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1376 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1377 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1378 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1379 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1380 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1381 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1382 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1383 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1384 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1385 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1386 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1387 /* Only read FCOE on 82599 */
1388 if (hw->mac.type != ixgbe_mac_82598EB) {
1389 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1390 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1391 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1392 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1393 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1396 /* Fill out the OS statistics structure */
1397 IXGBE_SET_IPACKETS(adapter, stats->gprc);
1398 IXGBE_SET_OPACKETS(adapter, stats->gptc);
1399 IXGBE_SET_IBYTES(adapter, stats->gorc);
1400 IXGBE_SET_OBYTES(adapter, stats->gotc);
1401 IXGBE_SET_IMCASTS(adapter, stats->mprc);
1402 IXGBE_SET_OMCASTS(adapter, stats->mptc);
1403 IXGBE_SET_COLLISIONS(adapter, 0);
1404 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1405 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1406 } /* ixgbe_update_stats_counters */
1408 /************************************************************************
1409 * ixgbe_add_hw_stats
1411 * Add sysctl variables, one per statistic, to the system.
1412 ************************************************************************/
1414 ixgbe_add_hw_stats(struct adapter *adapter)
1416 device_t dev = adapter->dev;
1417 struct tx_ring *txr = adapter->tx_rings;
1418 struct rx_ring *rxr = adapter->rx_rings;
1419 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1420 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1421 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1422 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1423 struct sysctl_oid *stat_node, *queue_node;
1424 struct sysctl_oid_list *stat_list, *queue_list;
1426 #define QUEUE_NAME_LEN 32
1427 char namebuf[QUEUE_NAME_LEN];
1429 /* Driver Statistics */
1430 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1431 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1432 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1433 CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1434 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1435 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1436 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1437 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1439 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1440 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1441 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1442 CTLFLAG_RD, NULL, "Queue Name");
1443 queue_list = SYSCTL_CHILDREN(queue_node);
1445 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1446 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1447 sizeof(&adapter->queues[i]),
1448 ixgbe_sysctl_interrupt_rate_handler, "IU",
1450 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1451 CTLFLAG_RD, &(adapter->queues[i].irqs),
1452 "irqs on this queue");
1453 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1454 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1455 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1456 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1457 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1458 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1459 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1460 CTLFLAG_RD, &txr->tso_tx, "TSO");
1461 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1462 CTLFLAG_RD, &txr->no_tx_dma_setup,
1463 "Driver tx dma failure in xmit");
1464 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1465 CTLFLAG_RD, &txr->no_desc_avail,
1466 "Queue No Descriptor Available");
1467 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1468 CTLFLAG_RD, &txr->total_packets,
1469 "Queue Packets Transmitted");
1470 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1471 CTLFLAG_RD, &txr->br->br_drops,
1472 "Packets dropped in buf_ring");
1475 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1476 struct lro_ctrl *lro = &rxr->lro;
1478 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1479 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1480 CTLFLAG_RD, NULL, "Queue Name");
1481 queue_list = SYSCTL_CHILDREN(queue_node);
1483 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1484 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1485 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1486 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1487 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1488 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1489 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1490 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1491 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1492 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1493 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1494 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1495 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1496 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1497 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1498 CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1499 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1500 CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1503 /* MAC stats get their own sub node */
1505 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1506 CTLFLAG_RD, NULL, "MAC Statistics");
1507 stat_list = SYSCTL_CHILDREN(stat_node);
1509 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1510 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1511 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1512 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1513 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1514 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1515 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1516 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1517 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1518 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1519 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1520 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1521 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1522 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1523 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1524 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1526 /* Flow Control stats */
1527 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1528 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1529 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1530 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1531 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1532 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1533 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1534 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1536 /* Packet Reception Stats */
1537 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1538 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1539 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1540 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1541 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1542 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1543 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1544 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1545 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1546 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1547 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1548 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1549 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1550 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1551 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1552 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1553 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1554 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1555 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1556 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1557 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1558 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1559 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1560 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1561 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1562 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1563 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1564 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1565 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1566 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1567 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1568 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1569 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1570 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1571 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1572 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1573 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1574 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1576 /* Packet Transmission Stats */
1577 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1578 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1579 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1580 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1581 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1582 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1583 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1584 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1585 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1586 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1587 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1588 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1589 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1590 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1591 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1592 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1593 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1594 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1595 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1596 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1597 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1598 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1599 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1600 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1601 } /* ixgbe_add_hw_stats */
1603 /************************************************************************
1604 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1606 * Retrieves the TDH value from the hardware
1607 ************************************************************************/
1609 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1611 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1618 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1619 error = sysctl_handle_int(oidp, &val, 0, req);
1620 if (error || !req->newptr)
1624 } /* ixgbe_sysctl_tdh_handler */
1626 /************************************************************************
1627 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1629 * Retrieves the TDT value from the hardware
1630 ************************************************************************/
1632 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1634 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1641 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1642 error = sysctl_handle_int(oidp, &val, 0, req);
1643 if (error || !req->newptr)
1647 } /* ixgbe_sysctl_tdt_handler */
1649 /************************************************************************
1650 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1652 * Retrieves the RDH value from the hardware
1653 ************************************************************************/
1655 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1657 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1664 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1665 error = sysctl_handle_int(oidp, &val, 0, req);
1666 if (error || !req->newptr)
1670 } /* ixgbe_sysctl_rdh_handler */
1672 /************************************************************************
1673 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1675 * Retrieves the RDT value from the hardware
1676 ************************************************************************/
1678 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1680 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1687 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1688 error = sysctl_handle_int(oidp, &val, 0, req);
1689 if (error || !req->newptr)
1693 } /* ixgbe_sysctl_rdt_handler */
1695 /************************************************************************
1696 * ixgbe_register_vlan
1698 * Run via vlan config EVENT, it enables us to use the
1699 * HW Filter table since we can get the vlan id. This
1700 * just creates the entry in the soft version of the
1701 * VFTA, init will repopulate the real table.
1702 ************************************************************************/
1704 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1706 struct adapter *adapter = ifp->if_softc;
1709 if (ifp->if_softc != arg) /* Not our event */
1712 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1715 IXGBE_CORE_LOCK(adapter);
1716 index = (vtag >> 5) & 0x7F;
1718 adapter->shadow_vfta[index] |= (1 << bit);
1719 ++adapter->num_vlans;
1720 ixgbe_setup_vlan_hw_support(adapter);
1721 IXGBE_CORE_UNLOCK(adapter);
1722 } /* ixgbe_register_vlan */
1724 /************************************************************************
1725 * ixgbe_unregister_vlan
1727 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1728 ************************************************************************/
1730 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1732 struct adapter *adapter = ifp->if_softc;
1735 if (ifp->if_softc != arg)
1738 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1741 IXGBE_CORE_LOCK(adapter);
1742 index = (vtag >> 5) & 0x7F;
1744 adapter->shadow_vfta[index] &= ~(1 << bit);
1745 --adapter->num_vlans;
1746 /* Re-init to load the changes */
1747 ixgbe_setup_vlan_hw_support(adapter);
1748 IXGBE_CORE_UNLOCK(adapter);
1749 } /* ixgbe_unregister_vlan */
1751 /************************************************************************
1752 * ixgbe_setup_vlan_hw_support
1753 ************************************************************************/
1755 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1757 struct ifnet *ifp = adapter->ifp;
1758 struct ixgbe_hw *hw = &adapter->hw;
1759 struct rx_ring *rxr;
1765 * We get here thru init_locked, meaning
1766 * a soft reset, this has already cleared
1767 * the VFTA and other state, so if there
1768 * have been no vlan's registered do nothing.
1770 if (adapter->num_vlans == 0)
1773 /* Setup the queues for vlans */
1774 for (i = 0; i < adapter->num_queues; i++) {
1775 rxr = &adapter->rx_rings[i];
1776 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1777 if (hw->mac.type != ixgbe_mac_82598EB) {
1778 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1779 ctrl |= IXGBE_RXDCTL_VME;
1780 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1782 rxr->vtag_strip = TRUE;
1785 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1788 * A soft reset zero's out the VFTA, so
1789 * we need to repopulate it now.
1791 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1792 if (adapter->shadow_vfta[i] != 0)
1793 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1794 adapter->shadow_vfta[i]);
1796 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1797 /* Enable the Filter Table if enabled */
1798 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1799 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1800 ctrl |= IXGBE_VLNCTRL_VFE;
1802 if (hw->mac.type == ixgbe_mac_82598EB)
1803 ctrl |= IXGBE_VLNCTRL_VME;
1804 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1805 } /* ixgbe_setup_vlan_hw_support */
1807 /************************************************************************
1808 * ixgbe_get_slot_info
1810 * Get the width and transaction speed of
1811 * the slot this adapter is plugged into.
1812 ************************************************************************/
1814 ixgbe_get_slot_info(struct adapter *adapter)
1816 device_t dev = adapter->dev;
1817 struct ixgbe_hw *hw = &adapter->hw;
1820 int bus_info_valid = TRUE;
1822 /* Some devices are behind an internal bridge */
1823 switch (hw->device_id) {
1824 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1825 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1826 goto get_parent_info;
1831 ixgbe_get_bus_info(hw);
1834 * Some devices don't use PCI-E, but there is no need
1835 * to display "Unknown" for bus speed and width.
1837 switch (hw->mac.type) {
1838 case ixgbe_mac_X550EM_x:
1839 case ixgbe_mac_X550EM_a:
1847 * For the Quad port adapter we need to parse back
1848 * up the PCI tree to find the speed of the expansion
1849 * slot into which this adapter is plugged. A bit more work.
1851 dev = device_get_parent(device_get_parent(dev));
1853 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1854 pci_get_slot(dev), pci_get_function(dev));
1856 dev = device_get_parent(device_get_parent(dev));
1858 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1859 pci_get_slot(dev), pci_get_function(dev));
1861 /* Now get the PCI Express Capabilities offset */
1862 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1864 * Hmm...can't get PCI-Express capabilities.
1865 * Falling back to default method.
1867 bus_info_valid = FALSE;
1868 ixgbe_get_bus_info(hw);
1871 /* ...and read the Link Status Register */
1872 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1873 ixgbe_set_pci_config_data_generic(hw, link);
1876 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1877 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
1878 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
1879 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
1881 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1882 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1883 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1886 if (bus_info_valid) {
1887 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1888 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1889 (hw->bus.speed == ixgbe_bus_speed_2500))) {
1890 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1891 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1893 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1894 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1895 (hw->bus.speed < ixgbe_bus_speed_8000))) {
1896 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1897 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1900 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1903 } /* ixgbe_get_slot_info */
1905 /************************************************************************
1906 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1907 ************************************************************************/
1909 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1911 struct ixgbe_hw *hw = &adapter->hw;
1912 u64 queue = (u64)(1 << vector);
1915 if (hw->mac.type == ixgbe_mac_82598EB) {
1916 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1917 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1919 mask = (queue & 0xFFFFFFFF);
1921 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1922 mask = (queue >> 32);
1924 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1926 } /* ixgbe_enable_queue */
1928 /************************************************************************
1929 * ixgbe_disable_queue
1930 ************************************************************************/
1932 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1934 struct ixgbe_hw *hw = &adapter->hw;
1935 u64 queue = (u64)(1 << vector);
1938 if (hw->mac.type == ixgbe_mac_82598EB) {
1939 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1940 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1942 mask = (queue & 0xFFFFFFFF);
1944 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1945 mask = (queue >> 32);
1947 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1949 } /* ixgbe_disable_queue */
1951 /************************************************************************
1952 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1953 ************************************************************************/
1955 ixgbe_msix_que(void *arg)
1957 struct ix_queue *que = arg;
1958 struct adapter *adapter = que->adapter;
1959 struct ifnet *ifp = adapter->ifp;
1960 struct tx_ring *txr = que->txr;
1961 struct rx_ring *rxr = que->rxr;
1966 /* Protect against spurious interrupts */
1967 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1970 ixgbe_disable_queue(adapter, que->msix);
1973 more = ixgbe_rxeof(que);
1977 if (!ixgbe_ring_empty(ifp, txr->br))
1978 ixgbe_start_locked(ifp, txr);
1979 IXGBE_TX_UNLOCK(txr);
1983 if (adapter->enable_aim == FALSE)
1986 * Do Adaptive Interrupt Moderation:
1987 * - Write out last calculated setting
1988 * - Calculate based on average size over
1989 * the last interval.
1991 if (que->eitr_setting)
1992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1995 que->eitr_setting = 0;
1997 /* Idle, do nothing */
1998 if ((txr->bytes == 0) && (rxr->bytes == 0))
2001 if ((txr->bytes) && (txr->packets))
2002 newitr = txr->bytes/txr->packets;
2003 if ((rxr->bytes) && (rxr->packets))
2004 newitr = max(newitr, (rxr->bytes / rxr->packets));
2005 newitr += 24; /* account for hardware frame, crc */
2007 /* set an upper boundary */
2008 newitr = min(newitr, 3000);
2010 /* Be nice to the mid range */
2011 if ((newitr > 300) && (newitr < 1200))
2012 newitr = (newitr / 3);
2014 newitr = (newitr / 2);
2016 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2017 newitr |= newitr << 16;
2019 newitr |= IXGBE_EITR_CNT_WDIS;
2021 /* save for next interrupt */
2022 que->eitr_setting = newitr;
2032 taskqueue_enqueue(que->tq, &que->que_task);
2034 ixgbe_enable_queue(adapter, que->msix);
2037 } /* ixgbe_msix_que */
2039 /************************************************************************
2040 * ixgbe_media_status - Media Ioctl callback
2042 * Called whenever the user queries the status of
2043 * the interface using ifconfig.
2044 ************************************************************************/
2046 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2048 struct adapter *adapter = ifp->if_softc;
2049 struct ixgbe_hw *hw = &adapter->hw;
2052 INIT_DEBUGOUT("ixgbe_media_status: begin");
2053 IXGBE_CORE_LOCK(adapter);
2054 ixgbe_update_link_status(adapter);
2056 ifmr->ifm_status = IFM_AVALID;
2057 ifmr->ifm_active = IFM_ETHER;
2059 if (!adapter->link_active) {
2060 IXGBE_CORE_UNLOCK(adapter);
2064 ifmr->ifm_status |= IFM_ACTIVE;
2065 layer = adapter->phy_layer;
2067 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2068 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2069 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2070 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2071 switch (adapter->link_speed) {
2072 case IXGBE_LINK_SPEED_10GB_FULL:
2073 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2075 case IXGBE_LINK_SPEED_1GB_FULL:
2076 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2078 case IXGBE_LINK_SPEED_100_FULL:
2079 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2081 case IXGBE_LINK_SPEED_10_FULL:
2082 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2085 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2086 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2087 switch (adapter->link_speed) {
2088 case IXGBE_LINK_SPEED_10GB_FULL:
2089 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2092 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2093 switch (adapter->link_speed) {
2094 case IXGBE_LINK_SPEED_10GB_FULL:
2095 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2097 case IXGBE_LINK_SPEED_1GB_FULL:
2098 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2101 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2102 switch (adapter->link_speed) {
2103 case IXGBE_LINK_SPEED_10GB_FULL:
2104 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2106 case IXGBE_LINK_SPEED_1GB_FULL:
2107 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2110 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2111 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2112 switch (adapter->link_speed) {
2113 case IXGBE_LINK_SPEED_10GB_FULL:
2114 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2116 case IXGBE_LINK_SPEED_1GB_FULL:
2117 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2120 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2121 switch (adapter->link_speed) {
2122 case IXGBE_LINK_SPEED_10GB_FULL:
2123 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2127 * XXX: These need to use the proper media types once
2130 #ifndef IFM_ETH_XTYPE
2131 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2132 switch (adapter->link_speed) {
2133 case IXGBE_LINK_SPEED_10GB_FULL:
2134 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2136 case IXGBE_LINK_SPEED_2_5GB_FULL:
2137 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2139 case IXGBE_LINK_SPEED_1GB_FULL:
2140 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2143 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2144 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2145 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2146 switch (adapter->link_speed) {
2147 case IXGBE_LINK_SPEED_10GB_FULL:
2148 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2150 case IXGBE_LINK_SPEED_2_5GB_FULL:
2151 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2153 case IXGBE_LINK_SPEED_1GB_FULL:
2154 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2158 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2159 switch (adapter->link_speed) {
2160 case IXGBE_LINK_SPEED_10GB_FULL:
2161 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2163 case IXGBE_LINK_SPEED_2_5GB_FULL:
2164 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2166 case IXGBE_LINK_SPEED_1GB_FULL:
2167 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2170 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2171 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2172 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2173 switch (adapter->link_speed) {
2174 case IXGBE_LINK_SPEED_10GB_FULL:
2175 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2177 case IXGBE_LINK_SPEED_2_5GB_FULL:
2178 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2180 case IXGBE_LINK_SPEED_1GB_FULL:
2181 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2186 /* If nothing is recognized... */
2187 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2188 ifmr->ifm_active |= IFM_UNKNOWN;
2190 #if __FreeBSD_version >= 900025
2191 /* Display current flow control setting used on link */
2192 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2193 hw->fc.current_mode == ixgbe_fc_full)
2194 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2195 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2196 hw->fc.current_mode == ixgbe_fc_full)
2197 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2200 IXGBE_CORE_UNLOCK(adapter);
2203 } /* ixgbe_media_status */
2205 /************************************************************************
2206 * ixgbe_media_change - Media Ioctl callback
2208 * Called when the user changes speed/duplex using
2209 * media/mediopt option with ifconfig.
2210 ************************************************************************/
2212 ixgbe_media_change(struct ifnet *ifp)
2214 struct adapter *adapter = ifp->if_softc;
2215 struct ifmedia *ifm = &adapter->media;
2216 struct ixgbe_hw *hw = &adapter->hw;
2217 ixgbe_link_speed speed = 0;
2219 INIT_DEBUGOUT("ixgbe_media_change: begin");
2221 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2224 if (hw->phy.media_type == ixgbe_media_type_backplane)
2228 * We don't actually need to check against the supported
2229 * media types of the adapter; ifmedia will take care of
2232 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2235 speed |= IXGBE_LINK_SPEED_100_FULL;
2236 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2237 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2241 #ifndef IFM_ETH_XTYPE
2242 case IFM_10G_SR: /* KR, too */
2243 case IFM_10G_CX4: /* KX4 */
2248 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2249 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2251 #ifndef IFM_ETH_XTYPE
2252 case IFM_1000_CX: /* KX */
2258 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2261 speed |= IXGBE_LINK_SPEED_100_FULL;
2262 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2264 case IFM_10G_TWINAX:
2265 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2268 speed |= IXGBE_LINK_SPEED_100_FULL;
2271 speed |= IXGBE_LINK_SPEED_10_FULL;
2277 hw->mac.autotry_restart = TRUE;
2278 hw->mac.ops.setup_link(hw, speed, TRUE);
2279 adapter->advertise =
2280 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2281 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2282 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2283 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
2288 device_printf(adapter->dev, "Invalid media type!\n");
2291 } /* ixgbe_media_change */
2293 /************************************************************************
2295 ************************************************************************/
2297 ixgbe_set_promisc(struct adapter *adapter)
2299 struct ifnet *ifp = adapter->ifp;
2303 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2304 rctl &= (~IXGBE_FCTRL_UPE);
2305 if (ifp->if_flags & IFF_ALLMULTI)
2306 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2308 struct ifmultiaddr *ifma;
2309 #if __FreeBSD_version < 800000
2312 if_maddr_rlock(ifp);
2314 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2315 if (ifma->ifma_addr->sa_family != AF_LINK)
2317 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2321 #if __FreeBSD_version < 800000
2322 IF_ADDR_UNLOCK(ifp);
2324 if_maddr_runlock(ifp);
2327 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2328 rctl &= (~IXGBE_FCTRL_MPE);
2329 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2331 if (ifp->if_flags & IFF_PROMISC) {
2332 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2333 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2334 } else if (ifp->if_flags & IFF_ALLMULTI) {
2335 rctl |= IXGBE_FCTRL_MPE;
2336 rctl &= ~IXGBE_FCTRL_UPE;
2337 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2339 } /* ixgbe_set_promisc */
2341 /************************************************************************
2342 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2343 ************************************************************************/
2345 ixgbe_msix_link(void *arg)
2347 struct adapter *adapter = arg;
2348 struct ixgbe_hw *hw = &adapter->hw;
2349 u32 eicr, eicr_mask;
2352 ++adapter->link_irq;
2354 /* Pause other interrupts */
2355 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2357 /* First get the cause */
2358 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2359 /* Be sure the queue bits are not cleared */
2360 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2361 /* Clear interrupt with write */
2362 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2364 /* Link status change */
2365 if (eicr & IXGBE_EICR_LSC) {
2366 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2367 adapter->task_requests |= IXGBE_REQUEST_TASK_LINK;
2368 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2371 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2372 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2373 (eicr & IXGBE_EICR_FLOW_DIR)) {
2374 /* This is probably overkill :) */
2375 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2377 /* Disable the interrupt */
2378 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2379 adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2380 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2383 if (eicr & IXGBE_EICR_ECC) {
2384 device_printf(adapter->dev,
2385 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
2386 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2389 /* Check for over temp condition */
2390 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2391 switch (adapter->hw.mac.type) {
2392 case ixgbe_mac_X550EM_a:
2393 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2395 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2396 IXGBE_EICR_GPI_SDP0_X550EM_a);
2397 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2398 IXGBE_EICR_GPI_SDP0_X550EM_a);
2399 retval = hw->phy.ops.check_overtemp(hw);
2400 if (retval != IXGBE_ERR_OVERTEMP)
2402 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2403 device_printf(adapter->dev, "System shutdown required!\n");
2406 if (!(eicr & IXGBE_EICR_TS))
2408 retval = hw->phy.ops.check_overtemp(hw);
2409 if (retval != IXGBE_ERR_OVERTEMP)
2411 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2412 device_printf(adapter->dev, "System shutdown required!\n");
2413 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2418 /* Check for VF message */
2419 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2420 (eicr & IXGBE_EICR_MAILBOX)) {
2421 adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2422 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2426 if (ixgbe_is_sfp(hw)) {
2427 /* Pluggable optics-related interrupt */
2428 if (hw->mac.type >= ixgbe_mac_X540)
2429 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2431 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2433 if (eicr & eicr_mask) {
2434 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2435 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2436 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2439 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2440 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2441 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2442 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2443 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2444 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2448 /* Check for fan failure */
2449 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2450 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2451 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2454 /* External PHY interrupt */
2455 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2456 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2457 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2458 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2459 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2461 } /* ixgbe_msix_link */
2463 /************************************************************************
2464 * ixgbe_sysctl_interrupt_rate_handler
2465 ************************************************************************/
2467 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2469 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2471 unsigned int reg, usec, rate;
2473 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2474 usec = ((reg & 0x0FF8) >> 3);
2476 rate = 500000 / usec;
2479 error = sysctl_handle_int(oidp, &rate, 0, req);
2480 if (error || !req->newptr)
2482 reg &= ~0xfff; /* default, no limitation */
2483 ixgbe_max_interrupt_rate = 0;
2484 if (rate > 0 && rate < 500000) {
2487 ixgbe_max_interrupt_rate = rate;
2488 reg |= ((4000000/rate) & 0xff8);
2490 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2493 } /* ixgbe_sysctl_interrupt_rate_handler */
2495 /************************************************************************
2496 * ixgbe_add_device_sysctls
2497 ************************************************************************/
2499 ixgbe_add_device_sysctls(struct adapter *adapter)
2501 device_t dev = adapter->dev;
2502 struct ixgbe_hw *hw = &adapter->hw;
2503 struct sysctl_oid_list *child;
2504 struct sysctl_ctx_list *ctx;
2506 ctx = device_get_sysctl_ctx(dev);
2507 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2509 /* Sysctls for all devices */
2510 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2511 adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2513 adapter->enable_aim = ixgbe_enable_aim;
2514 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2515 &adapter->enable_aim, 1, "Interrupt Moderation");
2517 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2518 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2519 IXGBE_SYSCTL_DESC_ADV_SPEED);
2522 /* testing sysctls (for all devices) */
2523 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2524 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2525 "I", "PCI Power State");
2527 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2528 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2529 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2531 /* for X550 series devices */
2532 if (hw->mac.type >= ixgbe_mac_X550)
2533 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2534 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2535 "I", "DMA Coalesce");
2537 /* for WoL-capable devices */
2538 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2539 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2540 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2541 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2543 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2544 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2545 "I", "Enable/Disable Wake Up Filters");
2548 /* for X552/X557-AT devices */
2549 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2550 struct sysctl_oid *phy_node;
2551 struct sysctl_oid_list *phy_list;
2553 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2554 CTLFLAG_RD, NULL, "External PHY sysctls");
2555 phy_list = SYSCTL_CHILDREN(phy_node);
2557 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2558 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2559 "I", "Current External PHY Temperature (Celsius)");
2561 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2562 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2563 ixgbe_sysctl_phy_overtemp_occurred, "I",
2564 "External PHY High Temperature Event Occurred");
2567 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2568 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2569 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2570 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2572 } /* ixgbe_add_device_sysctls */
2574 /************************************************************************
2575 * ixgbe_allocate_pci_resources
2576 ************************************************************************/
2578 ixgbe_allocate_pci_resources(struct adapter *adapter)
2580 device_t dev = adapter->dev;
2584 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2587 if (!(adapter->pci_mem)) {
2588 device_printf(dev, "Unable to allocate bus resource: memory\n");
2592 /* Save bus_space values for READ/WRITE_REG macros */
2593 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2594 adapter->osdep.mem_bus_space_handle =
2595 rman_get_bushandle(adapter->pci_mem);
2596 /* Set hw values for shared code */
2597 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2600 } /* ixgbe_allocate_pci_resources */
2602 /************************************************************************
2603 * ixgbe_detach - Device removal routine
2605 * Called when the driver is being removed.
2606 * Stops the adapter and deallocates all the resources
2607 * that were allocated for driver operation.
2609 * return 0 on success, positive on failure
2610 ************************************************************************/
2612 ixgbe_detach(device_t dev)
2614 struct adapter *adapter = device_get_softc(dev);
2615 struct ix_queue *que = adapter->queues;
2616 struct tx_ring *txr = adapter->tx_rings;
2619 INIT_DEBUGOUT("ixgbe_detach: begin");
2621 /* Make sure VLANS are not using driver */
2622 if (adapter->ifp->if_vlantrunk != NULL) {
2623 device_printf(dev, "Vlan in use, detach first\n");
2627 if (ixgbe_pci_iov_detach(dev) != 0) {
2628 device_printf(dev, "SR-IOV in use; detach first.\n");
2632 ether_ifdetach(adapter->ifp);
2633 /* Stop the adapter */
2634 IXGBE_CORE_LOCK(adapter);
2635 ixgbe_setup_low_power_mode(adapter);
2636 IXGBE_CORE_UNLOCK(adapter);
2638 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2640 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2641 taskqueue_drain(que->tq, &txr->txq_task);
2642 taskqueue_drain(que->tq, &que->que_task);
2643 taskqueue_free(que->tq);
2647 /* let hardware know driver is unloading */
2648 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2649 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2650 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2652 /* Unregister VLAN events */
2653 if (adapter->vlan_attach != NULL)
2654 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2655 if (adapter->vlan_detach != NULL)
2656 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2658 callout_drain(&adapter->timer);
2660 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2661 netmap_detach(adapter->ifp);
2663 /* Drain the Admin Task queue */
2665 taskqueue_drain(adapter->tq, &adapter->admin_task);
2666 taskqueue_free(adapter->tq);
2669 ixgbe_free_pci_resources(adapter);
2670 bus_generic_detach(dev);
2671 if_free(adapter->ifp);
2673 ixgbe_free_transmit_structures(adapter);
2674 ixgbe_free_receive_structures(adapter);
2675 free(adapter->queues, M_DEVBUF);
2676 free(adapter->mta, M_IXGBE);
2678 IXGBE_CORE_LOCK_DESTROY(adapter);
2681 } /* ixgbe_detach */
2683 /************************************************************************
2684 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2686 * Prepare the adapter/port for LPLU and/or WoL
2687 ************************************************************************/
2689 ixgbe_setup_low_power_mode(struct adapter *adapter)
2691 struct ixgbe_hw *hw = &adapter->hw;
2692 device_t dev = adapter->dev;
2695 mtx_assert(&adapter->core_mtx, MA_OWNED);
2697 /* Limit power management flow to X550EM baseT */
2698 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2699 hw->phy.ops.enter_lplu) {
2700 /* Turn off support for APM wakeup. (Using ACPI instead) */
2701 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2702 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2705 * Clear Wake Up Status register to prevent any previous wakeup
2706 * events from waking us up immediately after we suspend.
2708 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2711 * Program the Wakeup Filter Control register with user filter
2714 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2716 /* Enable wakeups and power management in Wakeup Control */
2717 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2718 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2720 /* X550EM baseT adapters need a special LPLU flow */
2721 hw->phy.reset_disable = true;
2722 ixgbe_stop(adapter);
2723 error = hw->phy.ops.enter_lplu(hw);
2725 device_printf(dev, "Error entering LPLU: %d\n", error);
2726 hw->phy.reset_disable = false;
2728 /* Just stop for other adapters */
2729 ixgbe_stop(adapter);
2733 } /* ixgbe_setup_low_power_mode */
2735 /************************************************************************
2736 * ixgbe_shutdown - Shutdown entry point
2737 ************************************************************************/
2739 ixgbe_shutdown(device_t dev)
2741 struct adapter *adapter = device_get_softc(dev);
2744 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2746 IXGBE_CORE_LOCK(adapter);
2747 error = ixgbe_setup_low_power_mode(adapter);
2748 IXGBE_CORE_UNLOCK(adapter);
2751 } /* ixgbe_shutdown */
2753 /************************************************************************
2757 ************************************************************************/
2759 ixgbe_suspend(device_t dev)
2761 struct adapter *adapter = device_get_softc(dev);
2764 INIT_DEBUGOUT("ixgbe_suspend: begin");
2766 IXGBE_CORE_LOCK(adapter);
2768 error = ixgbe_setup_low_power_mode(adapter);
2770 IXGBE_CORE_UNLOCK(adapter);
2773 } /* ixgbe_suspend */
2775 /************************************************************************
2779 ************************************************************************/
2781 ixgbe_resume(device_t dev)
2783 struct adapter *adapter = device_get_softc(dev);
2784 struct ifnet *ifp = adapter->ifp;
2785 struct ixgbe_hw *hw = &adapter->hw;
2788 INIT_DEBUGOUT("ixgbe_resume: begin");
2790 IXGBE_CORE_LOCK(adapter);
2792 /* Read & clear WUS register */
2793 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2795 device_printf(dev, "Woken up by (WUS): %#010x\n",
2796 IXGBE_READ_REG(hw, IXGBE_WUS));
2797 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2798 /* And clear WUFC until next low-power transition */
2799 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2802 * Required after D3->D0 transition;
2803 * will re-advertise all previous advertised speeds
2805 if (ifp->if_flags & IFF_UP)
2806 ixgbe_init_locked(adapter);
2808 IXGBE_CORE_UNLOCK(adapter);
2811 } /* ixgbe_resume */
2813 /************************************************************************
2814 * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2816 * Takes the ifnet's if_capenable flags (e.g. set by the user using
2817 * ifconfig) and indicates to the OS via the ifnet's if_hwassist
2818 * field what mbuf offload flags the driver will understand.
2819 ************************************************************************/
2821 ixgbe_set_if_hwassist(struct adapter *adapter)
2823 struct ifnet *ifp = adapter->ifp;
2825 ifp->if_hwassist = 0;
2826 #if __FreeBSD_version >= 1000000
2827 if (ifp->if_capenable & IFCAP_TSO4)
2828 ifp->if_hwassist |= CSUM_IP_TSO;
2829 if (ifp->if_capenable & IFCAP_TSO6)
2830 ifp->if_hwassist |= CSUM_IP6_TSO;
2831 if (ifp->if_capenable & IFCAP_TXCSUM) {
2832 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2833 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2834 ifp->if_hwassist |= CSUM_IP_SCTP;
2836 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2837 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2838 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2839 ifp->if_hwassist |= CSUM_IP6_SCTP;
2842 if (ifp->if_capenable & IFCAP_TSO)
2843 ifp->if_hwassist |= CSUM_TSO;
2844 if (ifp->if_capenable & IFCAP_TXCSUM) {
2845 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2846 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2847 ifp->if_hwassist |= CSUM_SCTP;
2850 } /* ixgbe_set_if_hwassist */
2852 /************************************************************************
2853 * ixgbe_init_locked - Init entry point
2855 * Used in two ways: It is used by the stack as an init
2856 * entry point in network interface structure. It is also
2857 * used by the driver as a hw/sw initialization routine to
2858 * get to a consistent state.
2860 * return 0 on success, positive on failure
2861 ************************************************************************/
2863 ixgbe_init_locked(struct adapter *adapter)
2865 struct ifnet *ifp = adapter->ifp;
2866 device_t dev = adapter->dev;
2867 struct ixgbe_hw *hw = &adapter->hw;
2868 struct tx_ring *txr;
2869 struct rx_ring *rxr;
2875 mtx_assert(&adapter->core_mtx, MA_OWNED);
2876 INIT_DEBUGOUT("ixgbe_init_locked: begin");
2878 hw->adapter_stopped = FALSE;
2879 ixgbe_stop_adapter(hw);
2880 callout_stop(&adapter->timer);
2882 /* Queue indices may change with IOV mode */
2883 ixgbe_align_all_queue_indices(adapter);
2885 /* reprogram the RAR[0] in case user changed it. */
2886 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2888 /* Get the latest mac address, User can use a LAA */
2889 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2890 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2891 hw->addr_ctrl.rar_used_count = 1;
2893 /* Set hardware offload abilities from ifnet flags */
2894 ixgbe_set_if_hwassist(adapter);
2896 /* Prepare transmit descriptors and buffers */
2897 if (ixgbe_setup_transmit_structures(adapter)) {
2898 device_printf(dev, "Could not setup transmit structures\n");
2899 ixgbe_stop(adapter);
2904 ixgbe_initialize_iov(adapter);
2905 ixgbe_initialize_transmit_units(adapter);
2907 /* Setup Multicast table */
2908 ixgbe_set_multi(adapter);
2910 /* Determine the correct mbuf pool, based on frame size */
2911 if (adapter->max_frame_size <= MCLBYTES)
2912 adapter->rx_mbuf_sz = MCLBYTES;
2914 adapter->rx_mbuf_sz = MJUMPAGESIZE;
2916 /* Prepare receive descriptors and buffers */
2917 if (ixgbe_setup_receive_structures(adapter)) {
2918 device_printf(dev, "Could not setup receive structures\n");
2919 ixgbe_stop(adapter);
2923 /* Configure RX settings */
2924 ixgbe_initialize_receive_units(adapter);
2926 /* Initialize variable holding task enqueue requests
2927 * generated by interrupt handlers */
2928 adapter->task_requests = 0;
2930 /* Enable SDP & MSI-X interrupts based on adapter */
2931 ixgbe_config_gpie(adapter);
2934 if (ifp->if_mtu > ETHERMTU) {
2935 /* aka IXGBE_MAXFRS on 82599 and newer */
2936 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2937 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2938 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2939 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2942 /* Now enable all the queues */
2943 for (int i = 0; i < adapter->num_queues; i++) {
2944 txr = &adapter->tx_rings[i];
2945 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2946 txdctl |= IXGBE_TXDCTL_ENABLE;
2947 /* Set WTHRESH to 8, burst writeback */
2948 txdctl |= (8 << 16);
2950 * When the internal queue falls below PTHRESH (32),
2951 * start prefetching as long as there are at least
2952 * HTHRESH (1) buffers ready. The values are taken
2953 * from the Intel linux driver 3.8.21.
2954 * Prefetching enables tx line rate even with 1 queue.
2956 txdctl |= (32 << 0) | (1 << 8);
2957 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2960 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2961 rxr = &adapter->rx_rings[i];
2962 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2963 if (hw->mac.type == ixgbe_mac_82598EB) {
2969 rxdctl &= ~0x3FFFFF;
2972 rxdctl |= IXGBE_RXDCTL_ENABLE;
2973 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2974 for (; j < 10; j++) {
2975 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2976 IXGBE_RXDCTL_ENABLE)
2984 * In netmap mode, we must preserve the buffers made
2985 * available to userspace before the if_init()
2986 * (this is true by default on the TX side, because
2987 * init makes all buffers available to userspace).
2989 * netmap_reset() and the device specific routines
2990 * (e.g. ixgbe_setup_receive_rings()) map these
2991 * buffers at the end of the NIC ring, so here we
2992 * must set the RDT (tail) register to make sure
2993 * they are not overwritten.
2995 * In this driver the NIC ring starts at RDH = 0,
2996 * RDT points to the last slot available for reception (?),
2997 * so RDT = num_rx_desc - 1 means the whole ring is available.
3000 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3001 (ifp->if_capenable & IFCAP_NETMAP)) {
3002 struct netmap_adapter *na = NA(adapter->ifp);
3003 struct netmap_kring *kring = na->rx_rings[i];
3004 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3006 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3008 #endif /* DEV_NETMAP */
3009 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3010 adapter->num_rx_desc - 1);
3013 /* Enable Receive engine */
3014 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3015 if (hw->mac.type == ixgbe_mac_82598EB)
3016 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3017 rxctrl |= IXGBE_RXCTRL_RXEN;
3018 ixgbe_enable_rx_dma(hw, rxctrl);
3020 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3022 /* Set up MSI-X routing */
3023 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3024 ixgbe_configure_ivars(adapter);
3025 /* Set up auto-mask */
3026 if (hw->mac.type == ixgbe_mac_82598EB)
3027 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3029 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3030 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3032 } else { /* Simple settings for Legacy/MSI */
3033 ixgbe_set_ivar(adapter, 0, 0, 0);
3034 ixgbe_set_ivar(adapter, 0, 0, 1);
3035 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3038 ixgbe_init_fdir(adapter);
3041 * Check on any SFP devices that
3042 * need to be kick-started
3044 if (hw->phy.type == ixgbe_phy_none) {
3045 err = hw->phy.ops.identify(hw);
3046 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3048 "Unsupported SFP+ module type was detected.\n");
3053 /* Set moderation on the Link interrupt */
3054 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3056 /* Config/Enable Link */
3057 ixgbe_config_link(adapter);
3059 /* Hardware Packet Buffer & Flow Control setup */
3060 ixgbe_config_delay_values(adapter);
3062 /* Initialize the FC settings */
3065 /* Set up VLAN support and filter */
3066 ixgbe_setup_vlan_hw_support(adapter);
3068 /* Setup DMA Coalescing */
3069 ixgbe_config_dmac(adapter);
3071 /* And now turn on interrupts */
3072 ixgbe_enable_intr(adapter, false);
3074 /* Enable the use of the MBX by the VF's */
3075 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3076 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3077 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3078 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3081 /* Now inform the stack we're ready */
3082 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3085 } /* ixgbe_init_locked */
3087 /************************************************************************
3089 ************************************************************************/
3091 ixgbe_init(void *arg)
3093 struct adapter *adapter = arg;
3095 IXGBE_CORE_LOCK(adapter);
3096 ixgbe_init_locked(adapter);
3097 IXGBE_CORE_UNLOCK(adapter);
3102 /************************************************************************
3105 * Setup the correct IVAR register for a particular MSI-X interrupt
3106 * (yes this is all very magic and confusing :)
3107 * - entry is the register array entry
3108 * - vector is the MSI-X vector for this queue
3109 * - type is RX/TX/MISC
3110 ************************************************************************/
3112 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3114 struct ixgbe_hw *hw = &adapter->hw;
3117 vector |= IXGBE_IVAR_ALLOC_VAL;
3119 switch (hw->mac.type) {
3121 case ixgbe_mac_82598EB:
3123 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3125 entry += (type * 64);
3126 index = (entry >> 2) & 0x1F;
3127 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3128 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3129 ivar |= (vector << (8 * (entry & 0x3)));
3130 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3133 case ixgbe_mac_82599EB:
3134 case ixgbe_mac_X540:
3135 case ixgbe_mac_X550:
3136 case ixgbe_mac_X550EM_x:
3137 case ixgbe_mac_X550EM_a:
3138 if (type == -1) { /* MISC IVAR */
3139 index = (entry & 1) * 8;
3140 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3141 ivar &= ~(0xFF << index);
3142 ivar |= (vector << index);
3143 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3144 } else { /* RX/TX IVARS */
3145 index = (16 * (entry & 1)) + (8 * type);
3146 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3147 ivar &= ~(0xFF << index);
3148 ivar |= (vector << index);
3149 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3155 } /* ixgbe_set_ivar */
3157 /************************************************************************
3158 * ixgbe_configure_ivars
3159 ************************************************************************/
3161 ixgbe_configure_ivars(struct adapter *adapter)
3163 struct ix_queue *que = adapter->queues;
3166 if (ixgbe_max_interrupt_rate > 0)
3167 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3170 * Disable DMA coalescing if interrupt moderation is
3177 for (int i = 0; i < adapter->num_queues; i++, que++) {
3178 struct rx_ring *rxr = &adapter->rx_rings[i];
3179 struct tx_ring *txr = &adapter->tx_rings[i];
3180 /* First the RX queue entry */
3181 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3182 /* ... and the TX */
3183 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3184 /* Set an Initial EITR value */
3185 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3188 /* For the Link interrupt */
3189 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3190 } /* ixgbe_configure_ivars */
3192 /************************************************************************
3194 ************************************************************************/
3196 ixgbe_config_gpie(struct adapter *adapter)
3198 struct ixgbe_hw *hw = &adapter->hw;
3201 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3203 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3204 /* Enable Enhanced MSI-X mode */
3205 gpie |= IXGBE_GPIE_MSIX_MODE
3207 | IXGBE_GPIE_PBA_SUPPORT
3211 /* Fan Failure Interrupt */
3212 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3213 gpie |= IXGBE_SDP1_GPIEN;
3215 /* Thermal Sensor Interrupt */
3216 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3217 gpie |= IXGBE_SDP0_GPIEN_X540;
3219 /* Link detection */
3220 switch (hw->mac.type) {
3221 case ixgbe_mac_82599EB:
3222 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3224 case ixgbe_mac_X550EM_x:
3225 case ixgbe_mac_X550EM_a:
3226 gpie |= IXGBE_SDP0_GPIEN_X540;
3232 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3235 } /* ixgbe_config_gpie */
3237 /************************************************************************
3238 * ixgbe_config_delay_values
3240 * Requires adapter->max_frame_size to be set.
3241 ************************************************************************/
3243 ixgbe_config_delay_values(struct adapter *adapter)
3245 struct ixgbe_hw *hw = &adapter->hw;
3246 u32 rxpb, frame, size, tmp;
3248 frame = adapter->max_frame_size;
3250 /* Calculate High Water */
3251 switch (hw->mac.type) {
3252 case ixgbe_mac_X540:
3253 case ixgbe_mac_X550:
3254 case ixgbe_mac_X550EM_x:
3255 case ixgbe_mac_X550EM_a:
3256 tmp = IXGBE_DV_X540(frame, frame);
3259 tmp = IXGBE_DV(frame, frame);
3262 size = IXGBE_BT2KB(tmp);
3263 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3264 hw->fc.high_water[0] = rxpb - size;
3266 /* Now calculate Low Water */
3267 switch (hw->mac.type) {
3268 case ixgbe_mac_X540:
3269 case ixgbe_mac_X550:
3270 case ixgbe_mac_X550EM_x:
3271 case ixgbe_mac_X550EM_a:
3272 tmp = IXGBE_LOW_DV_X540(frame);
3275 tmp = IXGBE_LOW_DV(frame);
3278 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3280 hw->fc.pause_time = IXGBE_FC_PAUSE;
3281 hw->fc.send_xon = TRUE;
3282 } /* ixgbe_config_delay_values */
3284 /************************************************************************
3285 * ixgbe_set_multi - Multicast Update
3287 * Called whenever multicast address list is updated.
3288 ************************************************************************/
3290 ixgbe_set_multi(struct adapter *adapter)
3292 struct ifmultiaddr *ifma;
3293 struct ixgbe_mc_addr *mta;
3294 struct ifnet *ifp = adapter->ifp;
3299 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3302 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3304 #if __FreeBSD_version < 800000
3307 if_maddr_rlock(ifp);
3309 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3310 if (ifma->ifma_addr->sa_family != AF_LINK)
3312 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3314 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3315 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3316 mta[mcnt].vmdq = adapter->pool;
3319 #if __FreeBSD_version < 800000
3320 IF_ADDR_UNLOCK(ifp);
3322 if_maddr_runlock(ifp);
3325 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3326 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3327 if (ifp->if_flags & IFF_PROMISC)
3328 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3329 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3330 ifp->if_flags & IFF_ALLMULTI) {
3331 fctrl |= IXGBE_FCTRL_MPE;
3332 fctrl &= ~IXGBE_FCTRL_UPE;
3334 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3336 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3338 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3339 update_ptr = (u8 *)mta;
3340 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3341 ixgbe_mc_array_itr, TRUE);
3345 } /* ixgbe_set_multi */
3347 /************************************************************************
3348 * ixgbe_mc_array_itr
3350 * An iterator function needed by the multicast shared code.
3351 * It feeds the shared code routine the addresses in the
3352 * array of ixgbe_set_multi() one by one.
3353 ************************************************************************/
3355 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3357 struct ixgbe_mc_addr *mta;
3359 mta = (struct ixgbe_mc_addr *)*update_ptr;
3362 *update_ptr = (u8*)(mta + 1);
3365 } /* ixgbe_mc_array_itr */
3367 /************************************************************************
3368 * ixgbe_local_timer - Timer routine
3370 * Checks for link status, updates statistics,
3371 * and runs the watchdog check.
3372 ************************************************************************/
3374 ixgbe_local_timer(void *arg)
3376 struct adapter *adapter = arg;
3377 device_t dev = adapter->dev;
3378 struct ix_queue *que = adapter->queues;
3382 mtx_assert(&adapter->core_mtx, MA_OWNED);
3384 /* Check for pluggable optics */
3385 if (adapter->sfp_probe)
3386 if (!ixgbe_sfp_probe(adapter))
3387 goto out; /* Nothing to do */
3389 ixgbe_update_link_status(adapter);
3390 ixgbe_update_stats_counters(adapter);
3393 * Check the TX queues status
3394 * - mark hung queues so we don't schedule on them
3395 * - watchdog only if all queues show hung
3397 for (int i = 0; i < adapter->num_queues; i++, que++) {
3398 /* Keep track of queues with work for soft irq */
3400 queues |= ((u64)1 << que->me);
3402 * Each time txeof runs without cleaning, but there
3403 * are uncleaned descriptors it increments busy. If
3404 * we get to the MAX we declare it hung.
3406 if (que->busy == IXGBE_QUEUE_HUNG) {
3408 /* Mark the queue as inactive */
3409 adapter->active_queues &= ~((u64)1 << que->me);
3412 /* Check if we've come back from hung */
3413 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3414 adapter->active_queues |= ((u64)1 << que->me);
3416 if (que->busy >= IXGBE_MAX_TX_BUSY) {
3418 "Warning queue %d appears to be hung!\n", i);
3419 que->txr->busy = IXGBE_QUEUE_HUNG;
3424 /* Only truly watchdog if all queues show hung */
3425 if (hung == adapter->num_queues)
3427 else if (queues != 0) { /* Force an IRQ on queues with work */
3428 ixgbe_rearm_queues(adapter, queues);
3432 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3436 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3437 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3438 adapter->watchdog_events++;
3439 ixgbe_init_locked(adapter);
3440 } /* ixgbe_local_timer */
3442 /************************************************************************
3445 * Determine if a port had optics inserted.
3446 ************************************************************************/
3448 ixgbe_sfp_probe(struct adapter *adapter)
3450 struct ixgbe_hw *hw = &adapter->hw;
3451 device_t dev = adapter->dev;
3452 bool result = FALSE;
3454 if ((hw->phy.type == ixgbe_phy_nl) &&
3455 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3456 s32 ret = hw->phy.ops.identify_sfp(hw);
3459 ret = hw->phy.ops.reset(hw);
3460 adapter->sfp_probe = FALSE;
3461 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3462 device_printf(dev, "Unsupported SFP+ module detected!");
3464 "Reload driver with supported module.\n");
3467 device_printf(dev, "SFP+ module detected!\n");
3468 /* We now have supported optics */
3474 } /* ixgbe_sfp_probe */
3476 /************************************************************************
3477 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3478 ************************************************************************/
3480 ixgbe_handle_mod(void *context)
3482 struct adapter *adapter = context;
3483 struct ixgbe_hw *hw = &adapter->hw;
3484 device_t dev = adapter->dev;
3485 u32 err, cage_full = 0;
3487 if (adapter->hw.need_crosstalk_fix) {
3488 switch (hw->mac.type) {
3489 case ixgbe_mac_82599EB:
3490 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3493 case ixgbe_mac_X550EM_x:
3494 case ixgbe_mac_X550EM_a:
3495 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3506 err = hw->phy.ops.identify_sfp(hw);
3507 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3509 "Unsupported SFP+ module type was detected.\n");
3510 goto handle_mod_out;
3513 err = hw->mac.ops.setup_sfp(hw);
3514 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3516 "Setup failure - unsupported SFP+ module type.\n");
3517 goto handle_mod_out;
3519 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3520 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3524 adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3525 } /* ixgbe_handle_mod */
3528 /************************************************************************
3529 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3530 ************************************************************************/
3532 ixgbe_handle_msf(void *context)
3534 struct adapter *adapter = context;
3535 struct ixgbe_hw *hw = &adapter->hw;
3539 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3540 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3542 autoneg = hw->phy.autoneg_advertised;
3543 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3544 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3545 if (hw->mac.ops.setup_link)
3546 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3548 /* Adjust media types shown in ifconfig */
3549 ifmedia_removeall(&adapter->media);
3550 ixgbe_add_media_types(adapter);
3551 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3552 } /* ixgbe_handle_msf */
3554 /************************************************************************
3555 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3556 ************************************************************************/
3558 ixgbe_handle_phy(void *context)
3560 struct adapter *adapter = context;
3561 struct ixgbe_hw *hw = &adapter->hw;
3564 error = hw->phy.ops.handle_lasi(hw);
3565 if (error == IXGBE_ERR_OVERTEMP)
3566 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3568 device_printf(adapter->dev,
3569 "Error handling LASI interrupt: %d\n", error);
3570 } /* ixgbe_handle_phy */
3572 /************************************************************************
3573 * ixgbe_handle_admin_task - Handler for interrupt tasklets meant to be
3574 * called in separate task.
3575 ************************************************************************/
3577 ixgbe_handle_admin_task(void *context, int pending)
3579 struct adapter *adapter = context;
3581 IXGBE_CORE_LOCK(adapter);
3582 ixgbe_disable_intr(adapter, true);
3584 if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3585 ixgbe_handle_mod(adapter);
3586 if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3587 ixgbe_handle_msf(adapter);
3588 if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3589 ixgbe_handle_mbx(adapter);
3590 if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3591 ixgbe_reinit_fdir(adapter);
3592 if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3593 ixgbe_handle_phy(adapter);
3594 if (adapter->task_requests & IXGBE_REQUEST_TASK_LINK)
3595 ixgbe_handle_link(adapter);
3596 adapter->task_requests = 0;
3598 ixgbe_enable_intr(adapter, true);
3599 IXGBE_CORE_UNLOCK(adapter);
3600 } /* ixgbe_handle_admin_task */
3602 /************************************************************************
3603 * ixgbe_stop - Stop the hardware
3605 * Disables all traffic on the adapter by issuing a
3606 * global reset on the MAC and deallocates TX/RX buffers.
3607 ************************************************************************/
3609 ixgbe_stop(void *arg)
3612 struct adapter *adapter = arg;
3613 struct ixgbe_hw *hw = &adapter->hw;
3617 mtx_assert(&adapter->core_mtx, MA_OWNED);
3619 INIT_DEBUGOUT("ixgbe_stop: begin\n");
3620 ixgbe_disable_intr(adapter, false);
3621 callout_stop(&adapter->timer);
3623 /* Let the stack know...*/
3624 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3627 hw->adapter_stopped = FALSE;
3628 ixgbe_stop_adapter(hw);
3629 if (hw->mac.type == ixgbe_mac_82599EB)
3630 ixgbe_stop_mac_link_on_d3_82599(hw);
3631 /* Turn off the laser - noop with no optics */
3632 ixgbe_disable_tx_laser(hw);
3634 /* Update the stack */
3635 adapter->link_up = FALSE;
3636 ixgbe_update_link_status(adapter);
3638 /* reprogram the RAR[0] in case user changed it. */
3639 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3644 /************************************************************************
3645 * ixgbe_update_link_status - Update OS on link state
3647 * Note: Only updates the OS on the cached link state.
3648 * The real check of the hardware only happens with
3650 ************************************************************************/
3652 ixgbe_update_link_status(struct adapter *adapter)
3654 struct ifnet *ifp = adapter->ifp;
3655 device_t dev = adapter->dev;
3657 if (adapter->link_up) {
3658 if (adapter->link_active == FALSE) {
3660 device_printf(dev, "Link is up %d Gbps %s \n",
3661 ((adapter->link_speed == 128) ? 10 : 1),
3663 adapter->link_active = TRUE;
3664 /* Update any Flow Control changes */
3665 ixgbe_fc_enable(&adapter->hw);
3666 /* Update DMA coalescing config */
3667 ixgbe_config_dmac(adapter);
3668 if_link_state_change(ifp, LINK_STATE_UP);
3669 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3670 ixgbe_ping_all_vfs(adapter);
3672 } else { /* Link down */
3673 if (adapter->link_active == TRUE) {
3675 device_printf(dev, "Link is Down\n");
3676 if_link_state_change(ifp, LINK_STATE_DOWN);
3677 adapter->link_active = FALSE;
3678 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3679 ixgbe_ping_all_vfs(adapter);
3684 } /* ixgbe_update_link_status */
3686 /************************************************************************
3687 * ixgbe_config_dmac - Configure DMA Coalescing
3688 ************************************************************************/
3690 ixgbe_config_dmac(struct adapter *adapter)
3692 struct ixgbe_hw *hw = &adapter->hw;
3693 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3695 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3698 if (dcfg->watchdog_timer ^ adapter->dmac ||
3699 dcfg->link_speed ^ adapter->link_speed) {
3700 dcfg->watchdog_timer = adapter->dmac;
3701 dcfg->fcoe_en = false;
3702 dcfg->link_speed = adapter->link_speed;
3705 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3706 dcfg->watchdog_timer, dcfg->link_speed);
3708 hw->mac.ops.dmac_config(hw);
3710 } /* ixgbe_config_dmac */
3712 /************************************************************************
3714 * If skip_traffic parameter is set, queues' irqs are not enabled.
3715 * This is useful while reenabling interrupts after disabling them
3716 * with ixgbe_disable_intr() 'keep_traffic' parameter set to true
3717 * as queues' interrupts are already enabled.
3718 ************************************************************************/
3720 ixgbe_enable_intr(struct adapter *adapter, bool skip_traffic)
3722 struct ixgbe_hw *hw = &adapter->hw;
3723 struct ix_queue *que = adapter->queues;
3726 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3728 switch (adapter->hw.mac.type) {
3729 case ixgbe_mac_82599EB:
3730 mask |= IXGBE_EIMS_ECC;
3731 /* Temperature sensor on some adapters */
3732 mask |= IXGBE_EIMS_GPI_SDP0;
3733 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3734 mask |= IXGBE_EIMS_GPI_SDP1;
3735 mask |= IXGBE_EIMS_GPI_SDP2;
3737 case ixgbe_mac_X540:
3738 /* Detect if Thermal Sensor is enabled */
3739 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3740 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3741 mask |= IXGBE_EIMS_TS;
3742 mask |= IXGBE_EIMS_ECC;
3744 case ixgbe_mac_X550:
3745 /* MAC thermal sensor is automatically enabled */
3746 mask |= IXGBE_EIMS_TS;
3747 mask |= IXGBE_EIMS_ECC;
3749 case ixgbe_mac_X550EM_x:
3750 case ixgbe_mac_X550EM_a:
3751 /* Some devices use SDP0 for important information */
3752 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3753 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3754 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3755 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3756 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3757 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3758 mask |= IXGBE_EICR_GPI_SDP0_X540;
3759 mask |= IXGBE_EIMS_ECC;
3765 /* Enable Fan Failure detection */
3766 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3767 mask |= IXGBE_EIMS_GPI_SDP1;
3769 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3770 mask |= IXGBE_EIMS_MAILBOX;
3771 /* Enable Flow Director */
3772 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3773 mask |= IXGBE_EIMS_FLOW_DIR;
3775 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3777 /* With MSI-X we use auto clear */
3778 if (adapter->msix_mem) {
3779 mask = IXGBE_EIMS_ENABLE_MASK;
3780 /* Don't autoclear Link */
3781 mask &= ~IXGBE_EIMS_OTHER;
3782 mask &= ~IXGBE_EIMS_LSC;
3783 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3784 mask &= ~IXGBE_EIMS_MAILBOX;
3785 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3788 if (!skip_traffic) {
3790 * Now enable all queues, this is done separately to
3791 * allow for handling the extended (beyond 32) MSI-X
3792 * vectors that can be used by 82599
3794 for (int i = 0; i < adapter->num_queues; i++, que++)
3795 ixgbe_enable_queue(adapter, que->msix);
3798 IXGBE_WRITE_FLUSH(hw);
3801 } /* ixgbe_enable_intr */
3803 /************************************************************************
3804 * ixgbe_disable_intr
3805 * If keep_traffic parameter is set, queue interrupts are not disabled.
3806 * This is needed by ixgbe_handle_admin_task() to handle link specific
3807 * interrupt procedures without stopping the traffic.
3808 ************************************************************************/
3810 ixgbe_disable_intr(struct adapter *adapter, bool keep_traffic)
3812 struct ixgbe_hw *hw = &adapter->hw;
3813 u32 eiac_mask, eimc_mask, eimc_ext_mask;
3816 /* Autoclear only queue irqs */
3817 eiac_mask = IXGBE_EICR_RTX_QUEUE;
3819 /* Disable everything but queue irqs */
3821 eimc_mask &= ~IXGBE_EIMC_RTX_QUEUE;
3825 eimc_mask = (hw->mac.type == ixgbe_mac_82598EB) ? ~0 : 0xFFFF0000;
3829 if (adapter->msix_mem)
3830 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac_mask);
3832 IXGBE_WRITE_REG(hw, IXGBE_EIMC, eimc_mask);
3833 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), eimc_ext_mask);
3834 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), eimc_ext_mask);
3836 IXGBE_WRITE_FLUSH(hw);
3839 } /* ixgbe_disable_intr */
3841 /************************************************************************
3842 * ixgbe_legacy_irq - Legacy Interrupt Service routine
3843 ************************************************************************/
3845 ixgbe_legacy_irq(void *arg)
3847 struct ix_queue *que = arg;
3848 struct adapter *adapter = que->adapter;
3849 struct ixgbe_hw *hw = &adapter->hw;
3850 struct ifnet *ifp = adapter->ifp;
3851 struct tx_ring *txr = adapter->tx_rings;
3853 u32 eicr, eicr_mask;
3855 /* Silicon errata #26 on 82598 */
3856 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3858 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3862 ixgbe_enable_intr(adapter, false);
3866 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3867 more = ixgbe_rxeof(que);
3871 if (!ixgbe_ring_empty(ifp, txr->br))
3872 ixgbe_start_locked(ifp, txr);
3873 IXGBE_TX_UNLOCK(txr);
3876 /* Check for fan failure */
3877 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3878 ixgbe_check_fan_failure(adapter, eicr, true);
3879 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3882 /* Link status change */
3883 if (eicr & IXGBE_EICR_LSC){
3884 adapter->task_requests |= IXGBE_REQUEST_TASK_LINK;
3885 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3888 if (ixgbe_is_sfp(hw)) {
3889 /* Pluggable optics-related interrupt */
3890 if (hw->mac.type >= ixgbe_mac_X540)
3891 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3893 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3895 if (eicr & eicr_mask) {
3896 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3897 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3898 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3901 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3902 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3903 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3904 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3905 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3906 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3910 /* External PHY interrupt */
3911 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3912 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3913 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3914 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3918 taskqueue_enqueue(que->tq, &que->que_task);
3920 ixgbe_enable_intr(adapter, false);
3923 } /* ixgbe_legacy_irq */
3925 /************************************************************************
3926 * ixgbe_free_pci_resources
3927 ************************************************************************/
3929 ixgbe_free_pci_resources(struct adapter *adapter)
3931 struct ix_queue *que = adapter->queues;
3932 device_t dev = adapter->dev;
3935 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3936 memrid = PCIR_BAR(MSIX_82598_BAR);
3938 memrid = PCIR_BAR(MSIX_82599_BAR);
3941 * There is a slight possibility of a failure mode
3942 * in attach that will result in entering this function
3943 * before interrupt resources have been initialized, and
3944 * in that case we do not want to execute the loops below
3945 * We can detect this reliably by the state of the adapter
3948 if (adapter->res == NULL)
3952 * Release all msix queue resources:
3954 for (int i = 0; i < adapter->num_queues; i++, que++) {
3955 rid = que->msix + 1;
3956 if (que->tag != NULL) {
3957 bus_teardown_intr(dev, que->res, que->tag);
3960 if (que->res != NULL)
3961 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
3965 if (adapter->tag != NULL) {
3966 bus_teardown_intr(dev, adapter->res, adapter->tag);
3967 adapter->tag = NULL;
3970 /* Clean the Legacy or Link interrupt last */
3971 if (adapter->res != NULL)
3972 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3976 if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3977 (adapter->feat_en & IXGBE_FEATURE_MSIX))
3978 pci_release_msi(dev);
3980 if (adapter->msix_mem != NULL)
3981 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3984 if (adapter->pci_mem != NULL)
3985 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3989 } /* ixgbe_free_pci_resources */
3991 /************************************************************************
3992 * ixgbe_set_sysctl_value
3993 ************************************************************************/
3995 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3996 const char *description, int *limit, int value)
3999 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4000 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4001 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4002 } /* ixgbe_set_sysctl_value */
4004 /************************************************************************
4005 * ixgbe_sysctl_flowcntl
4007 * SYSCTL wrapper around setting Flow Control
4008 ************************************************************************/
4010 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4012 struct adapter *adapter;
4015 adapter = (struct adapter *)arg1;
4016 fc = adapter->hw.fc.current_mode;
4018 error = sysctl_handle_int(oidp, &fc, 0, req);
4019 if ((error) || (req->newptr == NULL))
4022 /* Don't bother if it's not changed */
4023 if (fc == adapter->hw.fc.current_mode)
4026 return ixgbe_set_flowcntl(adapter, fc);
4027 } /* ixgbe_sysctl_flowcntl */
4029 /************************************************************************
4030 * ixgbe_set_flowcntl - Set flow control
4032 * Flow control values:
4037 ************************************************************************/
4039 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4042 case ixgbe_fc_rx_pause:
4043 case ixgbe_fc_tx_pause:
4045 adapter->hw.fc.requested_mode = fc;
4046 if (adapter->num_queues > 1)
4047 ixgbe_disable_rx_drop(adapter);
4050 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4051 if (adapter->num_queues > 1)
4052 ixgbe_enable_rx_drop(adapter);
4058 /* Don't autoneg if forcing a value */
4059 adapter->hw.fc.disable_fc_autoneg = TRUE;
4060 ixgbe_fc_enable(&adapter->hw);
4063 } /* ixgbe_set_flowcntl */
4065 /************************************************************************
4066 * ixgbe_enable_rx_drop
4068 * Enable the hardware to drop packets when the buffer is
4069 * full. This is useful with multiqueue, so that no single
4070 * queue being full stalls the entire RX engine. We only
4071 * enable this when Multiqueue is enabled AND Flow Control
4073 ************************************************************************/
4075 ixgbe_enable_rx_drop(struct adapter *adapter)
4077 struct ixgbe_hw *hw = &adapter->hw;
4078 struct rx_ring *rxr;
4081 for (int i = 0; i < adapter->num_queues; i++) {
4082 rxr = &adapter->rx_rings[i];
4083 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4084 srrctl |= IXGBE_SRRCTL_DROP_EN;
4085 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4088 /* enable drop for each vf */
4089 for (int i = 0; i < adapter->num_vfs; i++) {
4090 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4091 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4094 } /* ixgbe_enable_rx_drop */
4096 /************************************************************************
4097 * ixgbe_disable_rx_drop
4098 ************************************************************************/
4100 ixgbe_disable_rx_drop(struct adapter *adapter)
4102 struct ixgbe_hw *hw = &adapter->hw;
4103 struct rx_ring *rxr;
4106 for (int i = 0; i < adapter->num_queues; i++) {
4107 rxr = &adapter->rx_rings[i];
4108 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4109 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4110 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4113 /* disable drop for each vf */
4114 for (int i = 0; i < adapter->num_vfs; i++) {
4115 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4116 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4118 } /* ixgbe_disable_rx_drop */
4120 /************************************************************************
4121 * ixgbe_sysctl_advertise
4123 * SYSCTL wrapper around setting advertised speed
4124 ************************************************************************/
4126 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4128 struct adapter *adapter;
4129 int error, advertise;
4131 adapter = (struct adapter *)arg1;
4132 advertise = adapter->advertise;
4134 error = sysctl_handle_int(oidp, &advertise, 0, req);
4135 if ((error) || (req->newptr == NULL))
4138 return ixgbe_set_advertise(adapter, advertise);
4139 } /* ixgbe_sysctl_advertise */
4141 /************************************************************************
4142 * ixgbe_set_advertise - Control advertised link speed
4145 * 0x1 - advertise 100 Mb
4146 * 0x2 - advertise 1G
4147 * 0x4 - advertise 10G
4148 * 0x8 - advertise 10 Mb (yes, Mb)
4149 ************************************************************************/
4151 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4154 struct ixgbe_hw *hw;
4155 ixgbe_link_speed speed = 0;
4156 ixgbe_link_speed link_caps = 0;
4157 s32 err = IXGBE_NOT_IMPLEMENTED;
4158 bool negotiate = FALSE;
4160 /* Checks to validate new value */
4161 if (adapter->advertise == advertise) /* no change */
4167 /* No speed changes for backplane media */
4168 if (hw->phy.media_type == ixgbe_media_type_backplane)
4171 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4172 (hw->phy.multispeed_fiber))) {
4173 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4177 if (advertise < 0x1 || advertise > 0xF) {
4178 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4182 if (hw->mac.ops.get_link_capabilities) {
4183 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4185 if (err != IXGBE_SUCCESS) {
4186 device_printf(dev, "Unable to determine supported advertise speeds\n");
4191 /* Set new value and report new advertised mode */
4192 if (advertise & 0x1) {
4193 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4194 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4197 speed |= IXGBE_LINK_SPEED_100_FULL;
4199 if (advertise & 0x2) {
4200 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4201 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4204 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4206 if (advertise & 0x4) {
4207 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4208 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4211 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4213 if (advertise & 0x8) {
4214 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4215 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4218 speed |= IXGBE_LINK_SPEED_10_FULL;
4221 hw->mac.autotry_restart = TRUE;
4222 hw->mac.ops.setup_link(hw, speed, TRUE);
4223 adapter->advertise = advertise;
4226 } /* ixgbe_set_advertise */
4228 /************************************************************************
4229 * ixgbe_get_advertise - Get current advertised speed settings
4231 * Formatted for sysctl usage.
4233 * 0x1 - advertise 100 Mb
4234 * 0x2 - advertise 1G
4235 * 0x4 - advertise 10G
4236 * 0x8 - advertise 10 Mb (yes, Mb)
4237 ************************************************************************/
4239 ixgbe_get_advertise(struct adapter *adapter)
4241 struct ixgbe_hw *hw = &adapter->hw;
4243 ixgbe_link_speed link_caps = 0;
4245 bool negotiate = FALSE;
4248 * Advertised speed means nothing unless it's copper or
4251 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4252 !(hw->phy.multispeed_fiber))
4255 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4256 if (err != IXGBE_SUCCESS)
4260 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4261 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4262 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4263 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4266 } /* ixgbe_get_advertise */
4268 /************************************************************************
4269 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4272 * 0/1 - off / on (use default value of 1000)
4274 * Legal timer values are:
4275 * 50,100,250,500,1000,2000,5000,10000
4277 * Turning off interrupt moderation will also turn this off.
4278 ************************************************************************/
4280 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4282 struct adapter *adapter = (struct adapter *)arg1;
4283 struct ifnet *ifp = adapter->ifp;
4287 newval = adapter->dmac;
4288 error = sysctl_handle_int(oidp, &newval, 0, req);
4289 if ((error) || (req->newptr == NULL))
4298 /* Enable and use default */
4299 adapter->dmac = 1000;
4309 /* Legal values - allow */
4310 adapter->dmac = newval;
4313 /* Do nothing, illegal value */
4317 /* Re-initialize hardware if it's already running */
4318 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4319 ixgbe_init(adapter);
4322 } /* ixgbe_sysctl_dmac */
4325 /************************************************************************
4326 * ixgbe_sysctl_power_state
4328 * Sysctl to test power states
4330 * 0 - set device to D0
4331 * 3 - set device to D3
4332 * (none) - get current device power state
4333 ************************************************************************/
4335 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4337 struct adapter *adapter = (struct adapter *)arg1;
4338 device_t dev = adapter->dev;
4339 int curr_ps, new_ps, error = 0;
4341 curr_ps = new_ps = pci_get_powerstate(dev);
4343 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4344 if ((error) || (req->newptr == NULL))
4347 if (new_ps == curr_ps)
4350 if (new_ps == 3 && curr_ps == 0)
4351 error = DEVICE_SUSPEND(dev);
4352 else if (new_ps == 0 && curr_ps == 3)
4353 error = DEVICE_RESUME(dev);
4357 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4360 } /* ixgbe_sysctl_power_state */
4363 /************************************************************************
4364 * ixgbe_sysctl_wol_enable
4366 * Sysctl to enable/disable the WoL capability,
4367 * if supported by the adapter.
4372 ************************************************************************/
4374 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4376 struct adapter *adapter = (struct adapter *)arg1;
4377 struct ixgbe_hw *hw = &adapter->hw;
4378 int new_wol_enabled;
4381 new_wol_enabled = hw->wol_enabled;
4382 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4383 if ((error) || (req->newptr == NULL))
4385 new_wol_enabled = !!(new_wol_enabled);
4386 if (new_wol_enabled == hw->wol_enabled)
4389 if (new_wol_enabled > 0 && !adapter->wol_support)
4392 hw->wol_enabled = new_wol_enabled;
4395 } /* ixgbe_sysctl_wol_enable */
4397 /************************************************************************
4398 * ixgbe_sysctl_wufc - Wake Up Filter Control
4400 * Sysctl to enable/disable the types of packets that the
4401 * adapter will wake up on upon receipt.
4403 * 0x1 - Link Status Change
4404 * 0x2 - Magic Packet
4405 * 0x4 - Direct Exact
4406 * 0x8 - Directed Multicast
4408 * 0x20 - ARP/IPv4 Request Packet
4409 * 0x40 - Direct IPv4 Packet
4410 * 0x80 - Direct IPv6 Packet
4412 * Settings not listed above will cause the sysctl to return an error.
4413 ************************************************************************/
4415 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4417 struct adapter *adapter = (struct adapter *)arg1;
4421 new_wufc = adapter->wufc;
4423 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4424 if ((error) || (req->newptr == NULL))
4426 if (new_wufc == adapter->wufc)
4429 if (new_wufc & 0xffffff00)
4433 new_wufc |= (0xffffff & adapter->wufc);
4434 adapter->wufc = new_wufc;
4437 } /* ixgbe_sysctl_wufc */
4440 /************************************************************************
4441 * ixgbe_sysctl_print_rss_config
4442 ************************************************************************/
4444 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4446 struct adapter *adapter = (struct adapter *)arg1;
4447 struct ixgbe_hw *hw = &adapter->hw;
4448 device_t dev = adapter->dev;
4450 int error = 0, reta_size;
4453 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4455 device_printf(dev, "Could not allocate sbuf for output.\n");
4459 // TODO: use sbufs to make a string to print out
4460 /* Set multiplier for RETA setup and table size based on MAC */
4461 switch (adapter->hw.mac.type) {
4462 case ixgbe_mac_X550:
4463 case ixgbe_mac_X550EM_x:
4464 case ixgbe_mac_X550EM_a:
4472 /* Print out the redirection table */
4473 sbuf_cat(buf, "\n");
4474 for (int i = 0; i < reta_size; i++) {
4476 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4477 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4479 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4480 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4484 // TODO: print more config
4486 error = sbuf_finish(buf);
4488 device_printf(dev, "Error finishing sbuf: %d\n", error);
4493 } /* ixgbe_sysctl_print_rss_config */
4494 #endif /* IXGBE_DEBUG */
4496 /************************************************************************
4497 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4499 * For X552/X557-AT devices using an external PHY
4500 ************************************************************************/
4502 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4504 struct adapter *adapter = (struct adapter *)arg1;
4505 struct ixgbe_hw *hw = &adapter->hw;
4508 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4509 device_printf(adapter->dev,
4510 "Device has no supported external thermal sensor.\n");
4514 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4515 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4516 device_printf(adapter->dev,
4517 "Error reading from PHY's current temperature register\n");
4521 /* Shift temp for output */
4524 return (sysctl_handle_int(oidp, NULL, reg, req));
4525 } /* ixgbe_sysctl_phy_temp */
4527 /************************************************************************
4528 * ixgbe_sysctl_phy_overtemp_occurred
4530 * Reports (directly from the PHY) whether the current PHY
4531 * temperature is over the overtemp threshold.
4532 ************************************************************************/
4534 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4536 struct adapter *adapter = (struct adapter *)arg1;
4537 struct ixgbe_hw *hw = &adapter->hw;
4540 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4541 device_printf(adapter->dev,
4542 "Device has no supported external thermal sensor.\n");
4546 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4547 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4548 device_printf(adapter->dev,
4549 "Error reading from PHY's temperature status register\n");
4553 /* Get occurrence bit */
4554 reg = !!(reg & 0x4000);
4556 return (sysctl_handle_int(oidp, 0, reg, req));
4557 } /* ixgbe_sysctl_phy_overtemp_occurred */
4559 /************************************************************************
4560 * ixgbe_sysctl_eee_state
4562 * Sysctl to set EEE power saving feature
4566 * (none) - get current device EEE state
4567 ************************************************************************/
4569 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4571 struct adapter *adapter = (struct adapter *)arg1;
4572 device_t dev = adapter->dev;
4573 int curr_eee, new_eee, error = 0;
4576 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4578 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4579 if ((error) || (req->newptr == NULL))
4583 if (new_eee == curr_eee)
4587 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4590 /* Bounds checking */
4591 if ((new_eee < 0) || (new_eee > 1))
4594 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4596 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4600 /* Restart auto-neg */
4601 ixgbe_init(adapter);
4603 device_printf(dev, "New EEE state: %d\n", new_eee);
4605 /* Cache new value */
4607 adapter->feat_en |= IXGBE_FEATURE_EEE;
4609 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4612 } /* ixgbe_sysctl_eee_state */
4614 /************************************************************************
4615 * ixgbe_init_device_features
4616 ************************************************************************/
4618 ixgbe_init_device_features(struct adapter *adapter)
4620 adapter->feat_cap = IXGBE_FEATURE_NETMAP
4623 | IXGBE_FEATURE_MSIX
4624 | IXGBE_FEATURE_LEGACY_IRQ
4625 | IXGBE_FEATURE_LEGACY_TX;
4627 /* Set capabilities first... */
4628 switch (adapter->hw.mac.type) {
4629 case ixgbe_mac_82598EB:
4630 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4631 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4633 case ixgbe_mac_X540:
4634 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4635 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4636 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4637 (adapter->hw.bus.func == 0))
4638 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4640 case ixgbe_mac_X550:
4641 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4642 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4643 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4645 case ixgbe_mac_X550EM_x:
4646 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4647 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4648 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4649 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4651 case ixgbe_mac_X550EM_a:
4652 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4653 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4654 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4655 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4656 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4657 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4658 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4661 case ixgbe_mac_82599EB:
4662 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4663 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4664 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4665 (adapter->hw.bus.func == 0))
4666 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4667 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4668 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4674 /* Enabled by default... */
4675 /* Fan failure detection */
4676 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4677 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4679 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4680 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4682 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4683 adapter->feat_en |= IXGBE_FEATURE_EEE;
4684 /* Thermal Sensor */
4685 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4686 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4688 /* Enabled via global sysctl... */
4690 if (ixgbe_enable_fdir) {
4691 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4692 adapter->feat_en |= IXGBE_FEATURE_FDIR;
4694 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4696 /* Legacy (single queue) transmit */
4697 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4698 ixgbe_enable_legacy_tx)
4699 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4701 * Message Signal Interrupts - Extended (MSI-X)
4702 * Normal MSI is only enabled if MSI-X calls fail.
4704 if (!ixgbe_enable_msix)
4705 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4706 /* Receive-Side Scaling (RSS) */
4707 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4708 adapter->feat_en |= IXGBE_FEATURE_RSS;
4710 /* Disable features with unmet dependencies... */
4712 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4713 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4714 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4715 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4716 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4718 } /* ixgbe_init_device_features */
4720 /************************************************************************
4721 * ixgbe_probe - Device identification routine
4723 * Determines if the driver should be loaded on
4724 * adapter based on its PCI vendor/device ID.
4726 * return BUS_PROBE_DEFAULT on success, positive on failure
4727 ************************************************************************/
4729 ixgbe_probe(device_t dev)
4731 ixgbe_vendor_info_t *ent;
4733 u16 pci_vendor_id = 0;
4734 u16 pci_device_id = 0;
4735 u16 pci_subvendor_id = 0;
4736 u16 pci_subdevice_id = 0;
4737 char adapter_name[256];
4739 INIT_DEBUGOUT("ixgbe_probe: begin");
4741 pci_vendor_id = pci_get_vendor(dev);
4742 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4745 pci_device_id = pci_get_device(dev);
4746 pci_subvendor_id = pci_get_subvendor(dev);
4747 pci_subdevice_id = pci_get_subdevice(dev);
4749 ent = ixgbe_vendor_info_array;
4750 while (ent->vendor_id != 0) {
4751 if ((pci_vendor_id == ent->vendor_id) &&
4752 (pci_device_id == ent->device_id) &&
4753 ((pci_subvendor_id == ent->subvendor_id) ||
4754 (ent->subvendor_id == 0)) &&
4755 ((pci_subdevice_id == ent->subdevice_id) ||
4756 (ent->subdevice_id == 0))) {
4757 sprintf(adapter_name, "%s, Version - %s",
4758 ixgbe_strings[ent->index],
4759 ixgbe_driver_version);
4760 device_set_desc_copy(dev, adapter_name);
4761 ++ixgbe_total_ports;
4762 return (BUS_PROBE_DEFAULT);
4771 /************************************************************************
4772 * ixgbe_ioctl - Ioctl entry point
4774 * Called when the user wants to configure the interface.
4776 * return 0 on success, positive on failure
4777 ************************************************************************/
4779 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4781 struct adapter *adapter = ifp->if_softc;
4782 struct ifreq *ifr = (struct ifreq *) data;
4783 #if defined(INET) || defined(INET6)
4784 struct ifaddr *ifa = (struct ifaddr *)data;
4787 bool avoid_reset = FALSE;
4792 if (ifa->ifa_addr->sa_family == AF_INET)
4796 if (ifa->ifa_addr->sa_family == AF_INET6)
4800 * Calling init results in link renegotiation,
4801 * so we avoid doing it when possible.
4804 ifp->if_flags |= IFF_UP;
4805 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4806 ixgbe_init(adapter);
4808 if (!(ifp->if_flags & IFF_NOARP))
4809 arp_ifinit(ifp, ifa);
4812 error = ether_ioctl(ifp, command, data);
4815 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4816 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4819 IXGBE_CORE_LOCK(adapter);
4820 ifp->if_mtu = ifr->ifr_mtu;
4821 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4822 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4823 ixgbe_init_locked(adapter);
4824 ixgbe_recalculate_max_frame(adapter);
4825 IXGBE_CORE_UNLOCK(adapter);
4829 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4830 IXGBE_CORE_LOCK(adapter);
4831 if (ifp->if_flags & IFF_UP) {
4832 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4833 if ((ifp->if_flags ^ adapter->if_flags) &
4834 (IFF_PROMISC | IFF_ALLMULTI)) {
4835 ixgbe_set_promisc(adapter);
4838 ixgbe_init_locked(adapter);
4840 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4841 ixgbe_stop(adapter);
4842 adapter->if_flags = ifp->if_flags;
4843 IXGBE_CORE_UNLOCK(adapter);
4847 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4848 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4849 IXGBE_CORE_LOCK(adapter);
4850 ixgbe_disable_intr(adapter, false);
4851 ixgbe_set_multi(adapter);
4852 ixgbe_enable_intr(adapter, false);
4853 IXGBE_CORE_UNLOCK(adapter);
4858 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4859 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4863 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4865 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4870 /* HW cannot turn these on/off separately */
4871 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4872 ifp->if_capenable ^= IFCAP_RXCSUM;
4873 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4875 if (mask & IFCAP_TXCSUM)
4876 ifp->if_capenable ^= IFCAP_TXCSUM;
4877 if (mask & IFCAP_TXCSUM_IPV6)
4878 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4879 if (mask & IFCAP_TSO4)
4880 ifp->if_capenable ^= IFCAP_TSO4;
4881 if (mask & IFCAP_TSO6)
4882 ifp->if_capenable ^= IFCAP_TSO6;
4883 if (mask & IFCAP_LRO)
4884 ifp->if_capenable ^= IFCAP_LRO;
4885 if (mask & IFCAP_VLAN_HWTAGGING)
4886 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4887 if (mask & IFCAP_VLAN_HWFILTER)
4888 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4889 if (mask & IFCAP_VLAN_HWTSO)
4890 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4892 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4893 IXGBE_CORE_LOCK(adapter);
4894 ixgbe_init_locked(adapter);
4895 IXGBE_CORE_UNLOCK(adapter);
4897 VLAN_CAPABILITIES(ifp);
4900 #if __FreeBSD_version >= 1100036
4903 struct ixgbe_hw *hw = &adapter->hw;
4904 struct ifi2creq i2c;
4907 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4908 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4911 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4915 if (i2c.len > sizeof(i2c.data)) {
4920 for (i = 0; i < i2c.len; i++)
4921 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4922 i2c.dev_addr, &i2c.data[i]);
4923 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
4928 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4929 error = ether_ioctl(ifp, command, data);
4936 /************************************************************************
4937 * ixgbe_check_fan_failure
4938 ************************************************************************/
4940 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4944 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4948 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4949 } /* ixgbe_check_fan_failure */
4951 /************************************************************************
4953 ************************************************************************/
4955 ixgbe_handle_que(void *context, int pending)
4957 struct ix_queue *que = context;
4958 struct adapter *adapter = que->adapter;
4959 struct tx_ring *txr = que->txr;
4960 struct ifnet *ifp = adapter->ifp;
4962 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4966 if (!ixgbe_ring_empty(ifp, txr->br))
4967 ixgbe_start_locked(ifp, txr);
4968 IXGBE_TX_UNLOCK(txr);
4971 /* Re-enable this interrupt */
4972 if (que->res != NULL)
4973 ixgbe_enable_queue(adapter, que->msix);
4975 ixgbe_enable_intr(adapter, false);
4978 } /* ixgbe_handle_que */
4982 /************************************************************************
4983 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4984 ************************************************************************/
4986 ixgbe_allocate_legacy(struct adapter *adapter)
4988 device_t dev = adapter->dev;
4989 struct ix_queue *que = adapter->queues;
4990 struct tx_ring *txr = adapter->tx_rings;
4993 /* We allocate a single interrupt resource */
4994 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4995 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4996 if (adapter->res == NULL) {
4998 "Unable to allocate bus resource: interrupt\n");
5003 * Try allocating a fast interrupt and the associated deferred
5004 * processing contexts.
5006 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5007 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
5008 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5009 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5010 taskqueue_thread_enqueue, &que->tq);
5011 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
5012 device_get_nameunit(adapter->dev));
5014 if ((error = bus_setup_intr(dev, adapter->res,
5015 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
5016 &adapter->tag)) != 0) {
5018 "Failed to register fast interrupt handler: %d\n", error);
5019 taskqueue_free(que->tq);
5024 /* For simplicity in the handlers */
5025 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
5028 } /* ixgbe_allocate_legacy */
5031 /************************************************************************
5032 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
5033 ************************************************************************/
5035 ixgbe_allocate_msix(struct adapter *adapter)
5037 device_t dev = adapter->dev;
5038 struct ix_queue *que = adapter->queues;
5039 struct tx_ring *txr = adapter->tx_rings;
5040 int error, rid, vector = 0;
5042 unsigned int rss_buckets = 0;
5046 * If we're doing RSS, the number of queues needs to
5047 * match the number of RSS buckets that are configured.
5049 * + If there's more queues than RSS buckets, we'll end
5050 * up with queues that get no traffic.
5052 * + If there's more RSS buckets than queues, we'll end
5053 * up having multiple RSS buckets map to the same queue,
5054 * so there'll be some contention.
5056 rss_buckets = rss_getnumbuckets();
5057 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
5058 (adapter->num_queues != rss_buckets)) {
5059 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
5060 __func__, adapter->num_queues, rss_buckets);
5063 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
5065 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
5066 RF_SHAREABLE | RF_ACTIVE);
5067 if (que->res == NULL) {
5068 device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5072 /* Set the handler function */
5073 error = bus_setup_intr(dev, que->res,
5074 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5078 device_printf(dev, "Failed to register QUE handler");
5081 #if __FreeBSD_version >= 800504
5082 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5085 adapter->active_queues |= (u64)(1 << que->msix);
5087 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5089 * The queue ID is used as the RSS layer bucket ID.
5090 * We look up the queue ID -> RSS CPU ID and select
5093 cpu_id = rss_getcpu(i % rss_buckets);
5094 CPU_SETOF(cpu_id, &cpu_mask);
5097 * Bind the MSI-X vector, and thus the
5098 * rings to the corresponding CPU.
5100 * This just happens to match the default RSS
5101 * round-robin bucket -> queue -> CPU allocation.
5103 if (adapter->num_queues > 1)
5106 if (adapter->num_queues > 1)
5107 bus_bind_intr(dev, que->res, cpu_id);
5109 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5110 device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5113 device_printf(dev, "Bound queue %d to cpu %d\n", i,
5115 #endif /* IXGBE_DEBUG */
5118 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5119 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5121 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5122 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5123 taskqueue_thread_enqueue, &que->tq);
5124 #if __FreeBSD_version < 1100000
5125 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5126 device_get_nameunit(adapter->dev), i);
5128 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5129 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5130 &cpu_mask, "%s (bucket %d)",
5131 device_get_nameunit(adapter->dev), cpu_id);
5133 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5134 NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5140 adapter->link_rid = vector + 1;
5141 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5142 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5143 if (!adapter->res) {
5145 "Unable to allocate bus resource: Link interrupt [%d]\n",
5149 /* Set the link handler function */
5150 error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5151 NULL, ixgbe_msix_link, adapter, &adapter->tag);
5153 adapter->res = NULL;
5154 device_printf(dev, "Failed to register LINK handler");
5157 #if __FreeBSD_version >= 800504
5158 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5160 adapter->vector = vector;
5162 } /* ixgbe_allocate_msix */
5164 /************************************************************************
5165 * ixgbe_configure_interrupts
5167 * Setup MSI-X, MSI, or legacy interrupts (in that order).
5168 * This will also depend on user settings.
5169 ************************************************************************/
5171 ixgbe_configure_interrupts(struct adapter *adapter)
5173 device_t dev = adapter->dev;
5174 int rid, want, queues, msgs;
5176 /* Default to 1 queue if MSI-X setup fails */
5177 adapter->num_queues = 1;
5179 /* Override by tuneable */
5180 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5183 /* First try MSI-X */
5184 msgs = pci_msix_count(dev);
5187 rid = PCIR_BAR(MSIX_82598_BAR);
5188 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5190 if (adapter->msix_mem == NULL) {
5191 rid += 4; /* 82599 maps in higher BAR */
5192 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5195 if (adapter->msix_mem == NULL) {
5196 /* May not be enabled */
5197 device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5201 /* Figure out a reasonable auto config value */
5202 queues = min(mp_ncpus, msgs - 1);
5203 /* If we're doing RSS, clamp at the number of RSS buckets */
5204 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5205 queues = min(queues, rss_getnumbuckets());
5206 if (ixgbe_num_queues > queues) {
5207 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5208 ixgbe_num_queues = queues;
5211 if (ixgbe_num_queues != 0)
5212 queues = ixgbe_num_queues;
5213 /* Set max queues to 8 when autoconfiguring */
5215 queues = min(queues, 8);
5217 /* reflect correct sysctl value */
5218 ixgbe_num_queues = queues;
5221 * Want one vector (RX/TX pair) per queue
5222 * plus an additional for Link.
5228 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5232 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5233 device_printf(adapter->dev,
5234 "Using MSI-X interrupts with %d vectors\n", msgs);
5235 adapter->num_queues = queues;
5236 adapter->feat_en |= IXGBE_FEATURE_MSIX;
5240 * MSI-X allocation failed or provided us with
5241 * less vectors than needed. Free MSI-X resources
5242 * and we'll try enabling MSI.
5244 pci_release_msi(dev);
5247 /* Without MSI-X, some features are no longer supported */
5248 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5249 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5250 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5251 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5253 if (adapter->msix_mem != NULL) {
5254 bus_release_resource(dev, SYS_RES_MEMORY, rid,
5256 adapter->msix_mem = NULL;
5259 if (pci_alloc_msi(dev, &msgs) == 0) {
5260 adapter->feat_en |= IXGBE_FEATURE_MSI;
5261 adapter->link_rid = 1;
5262 device_printf(adapter->dev, "Using an MSI interrupt\n");
5266 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5267 device_printf(adapter->dev,
5268 "Device does not support legacy interrupts.\n");
5272 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5273 adapter->link_rid = 0;
5274 device_printf(adapter->dev, "Using a Legacy interrupt\n");
5277 } /* ixgbe_configure_interrupts */
5280 /************************************************************************
5281 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5283 * Done outside of interrupt context since the driver might sleep
5284 ************************************************************************/
5286 ixgbe_handle_link(void *context)
5288 struct adapter *adapter = context;
5289 struct ixgbe_hw *hw = &adapter->hw;
5291 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5293 /* Re-enable link interrupts */
5294 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5295 } /* ixgbe_handle_link */
5297 /************************************************************************
5298 * ixgbe_rearm_queues
5299 ************************************************************************/
5301 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5305 switch (adapter->hw.mac.type) {
5306 case ixgbe_mac_82598EB:
5307 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5308 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5310 case ixgbe_mac_82599EB:
5311 case ixgbe_mac_X540:
5312 case ixgbe_mac_X550:
5313 case ixgbe_mac_X550EM_x:
5314 case ixgbe_mac_X550EM_a:
5315 mask = (queues & 0xFFFFFFFF);
5316 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5317 mask = (queues >> 32);
5318 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5323 } /* ixgbe_rearm_queues */