1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixgbe_type.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
40 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
41 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
44 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
45 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
46 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
47 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
48 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
49 ixgbe_link_speed speed, bool autoneg,
50 bool autoneg_wait_to_complete);
51 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
52 ixgbe_link_speed speed, bool autoneg,
53 bool autoneg_wait_to_complete);
54 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
55 bool autoneg_wait_to_complete);
56 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
57 ixgbe_link_speed speed,
59 bool autoneg_wait_to_complete);
60 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
61 ixgbe_link_speed speed,
63 bool autoneg_wait_to_complete);
64 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
65 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
66 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
67 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
68 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
69 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
70 void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw);
71 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
72 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
73 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
74 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
75 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
76 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
78 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
80 struct ixgbe_mac_info *mac = &hw->mac;
82 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
84 /* enable the laser control functions for SFP+ fiber */
85 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
86 mac->ops.disable_tx_laser =
87 &ixgbe_disable_tx_laser_multispeed_fiber;
88 mac->ops.enable_tx_laser =
89 &ixgbe_enable_tx_laser_multispeed_fiber;
90 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
93 mac->ops.disable_tx_laser = NULL;
94 mac->ops.enable_tx_laser = NULL;
95 mac->ops.flap_tx_laser = NULL;
98 if (hw->phy.multispeed_fiber) {
99 /* Set up dual speed SFP+ support */
100 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
102 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
103 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
104 hw->phy.smart_speed == ixgbe_smart_speed_on)) {
105 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
107 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
113 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
114 * @hw: pointer to hardware structure
116 * Initialize any function pointers that were not able to be
117 * set during init_shared_code because the PHY/SFP type was
118 * not known. Perform the SFP init if necessary.
121 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
123 struct ixgbe_mac_info *mac = &hw->mac;
124 struct ixgbe_phy_info *phy = &hw->phy;
125 s32 ret_val = IXGBE_SUCCESS;
127 DEBUGFUNC("ixgbe_init_phy_ops_82599");
129 /* Identify the PHY or SFP module */
130 ret_val = phy->ops.identify(hw);
131 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
132 goto init_phy_ops_out;
134 /* Setup function pointers based on detected SFP module and speeds */
135 ixgbe_init_mac_link_ops_82599(hw);
136 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
137 hw->phy.ops.reset = NULL;
139 /* If copper media, overwrite with copper function pointers */
140 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
141 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
142 mac->ops.get_link_capabilities =
143 &ixgbe_get_copper_link_capabilities_generic;
146 /* Set necessary function pointers based on phy type */
147 switch (hw->phy.type) {
149 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
150 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
151 phy->ops.get_firmware_version =
152 &ixgbe_get_phy_firmware_version_tnx;
155 phy->ops.get_firmware_version =
156 &ixgbe_get_phy_firmware_version_generic;
165 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
167 s32 ret_val = IXGBE_SUCCESS;
170 u16 list_offset, data_offset, data_value;
172 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
174 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
175 ixgbe_init_mac_link_ops_82599(hw);
177 hw->phy.ops.reset = NULL;
179 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
181 if (ret_val != IXGBE_SUCCESS)
184 /* PHY config will finish before releasing the semaphore */
185 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
186 if (ret_val != IXGBE_SUCCESS) {
187 ret_val = IXGBE_ERR_SWFW_SYNC;
191 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
192 while (data_value != 0xffff) {
193 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
194 IXGBE_WRITE_FLUSH(hw);
195 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
198 /* Release the semaphore */
199 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
200 /* Delay obtaining semaphore again to allow FW access */
201 msec_delay(hw->eeprom.semaphore_delay);
203 /* Now restart DSP by setting Restart_AN and clearing LMS */
204 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
205 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
206 IXGBE_AUTOC_AN_RESTART));
208 /* Wait for AN to leave state 0 */
209 for (i = 0; i < 10; i++) {
211 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
212 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
215 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
216 DEBUGOUT("sfp module setup not complete\n");
217 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
221 /* Restart DSP by setting Restart_AN and return to SFI mode */
222 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
223 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
224 IXGBE_AUTOC_AN_RESTART));
232 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
233 * @hw: pointer to hardware structure
235 * Initialize the function pointers and assign the MAC type for 82599.
236 * Does not touch the hardware.
239 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
241 struct ixgbe_mac_info *mac = &hw->mac;
242 struct ixgbe_phy_info *phy = &hw->phy;
245 DEBUGFUNC("ixgbe_init_ops_82599");
247 ret_val = ixgbe_init_phy_ops_generic(hw);
248 ret_val = ixgbe_init_ops_generic(hw);
251 phy->ops.identify = &ixgbe_identify_phy_82599;
252 phy->ops.init = &ixgbe_init_phy_ops_82599;
255 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
256 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82599;
257 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
258 mac->ops.get_supported_physical_layer =
259 &ixgbe_get_supported_physical_layer_82599;
260 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
261 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
262 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
263 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
264 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
265 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
266 mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
267 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
268 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
270 /* RAR, Multicast, VLAN */
271 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
272 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
273 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
274 mac->rar_highwater = 1;
275 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
276 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
277 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
278 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
279 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
280 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
283 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
284 mac->ops.check_link = &ixgbe_check_mac_link_generic;
285 ixgbe_init_mac_link_ops_82599(hw);
287 mac->mcft_size = 128;
289 mac->num_rar_entries = 128;
290 mac->rx_pb_size = 512;
291 mac->max_tx_queues = 128;
292 mac->max_rx_queues = 128;
293 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
295 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
301 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
302 * @hw: pointer to hardware structure
303 * @speed: pointer to link speed
304 * @negotiation: TRUE when autoneg or autotry is enabled
306 * Determines the link capabilities by reading the AUTOC register.
308 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
309 ixgbe_link_speed *speed,
312 s32 status = IXGBE_SUCCESS;
315 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
318 /* Check if 1G SFP module. */
319 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
320 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
321 *speed = IXGBE_LINK_SPEED_1GB_FULL;
327 * Determine link capabilities based on the stored value of AUTOC,
328 * which represents EEPROM defaults. If AUTOC value has not
329 * been stored, use the current register values.
331 if (hw->mac.orig_link_settings_stored)
332 autoc = hw->mac.orig_autoc;
334 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
336 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
337 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
338 *speed = IXGBE_LINK_SPEED_1GB_FULL;
339 *negotiation = FALSE;
342 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
343 *speed = IXGBE_LINK_SPEED_10GB_FULL;
344 *negotiation = FALSE;
347 case IXGBE_AUTOC_LMS_1G_AN:
348 *speed = IXGBE_LINK_SPEED_1GB_FULL;
352 case IXGBE_AUTOC_LMS_10G_SERIAL:
353 *speed = IXGBE_LINK_SPEED_10GB_FULL;
354 *negotiation = FALSE;
357 case IXGBE_AUTOC_LMS_KX4_KX_KR:
358 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
359 *speed = IXGBE_LINK_SPEED_UNKNOWN;
360 if (autoc & IXGBE_AUTOC_KR_SUPP)
361 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
362 if (autoc & IXGBE_AUTOC_KX4_SUPP)
363 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
364 if (autoc & IXGBE_AUTOC_KX_SUPP)
365 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
369 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
370 *speed = IXGBE_LINK_SPEED_100_FULL;
371 if (autoc & IXGBE_AUTOC_KR_SUPP)
372 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
373 if (autoc & IXGBE_AUTOC_KX4_SUPP)
374 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
375 if (autoc & IXGBE_AUTOC_KX_SUPP)
376 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
380 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
381 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
382 *negotiation = FALSE;
386 status = IXGBE_ERR_LINK_SETUP;
391 if (hw->phy.multispeed_fiber) {
392 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
393 IXGBE_LINK_SPEED_1GB_FULL;
402 * ixgbe_get_media_type_82599 - Get media type
403 * @hw: pointer to hardware structure
405 * Returns the media type (fiber, copper, backplane)
407 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
409 enum ixgbe_media_type media_type;
411 DEBUGFUNC("ixgbe_get_media_type_82599");
413 /* Detect if there is a copper PHY attached. */
414 switch (hw->phy.type) {
415 case ixgbe_phy_cu_unknown:
418 media_type = ixgbe_media_type_copper;
424 switch (hw->device_id) {
425 case IXGBE_DEV_ID_82599_KX4:
426 case IXGBE_DEV_ID_82599_KX4_MEZZ:
427 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
428 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
429 case IXGBE_DEV_ID_82599_XAUI_LOM:
430 /* Default device ID is mezzanine card KX/KX4 */
431 media_type = ixgbe_media_type_backplane;
433 case IXGBE_DEV_ID_82599_SFP:
434 case IXGBE_DEV_ID_82599_SFP_FCOE:
435 media_type = ixgbe_media_type_fiber;
437 case IXGBE_DEV_ID_82599_CX4:
438 media_type = ixgbe_media_type_cx4;
440 case IXGBE_DEV_ID_82599_T3_LOM:
441 media_type = ixgbe_media_type_copper;
444 media_type = ixgbe_media_type_unknown;
452 * ixgbe_start_mac_link_82599 - Setup MAC link settings
453 * @hw: pointer to hardware structure
454 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
456 * Configures link settings based on values in the ixgbe_hw struct.
457 * Restarts the link. Performs autonegotiation if needed.
459 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
460 bool autoneg_wait_to_complete)
465 s32 status = IXGBE_SUCCESS;
467 DEBUGFUNC("ixgbe_start_mac_link_82599");
471 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
472 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
473 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
475 /* Only poll for autoneg to complete if specified to do so */
476 if (autoneg_wait_to_complete) {
477 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
478 IXGBE_AUTOC_LMS_KX4_KX_KR ||
479 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
480 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
481 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
482 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
483 links_reg = 0; /* Just in case Autoneg time = 0 */
484 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
485 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
486 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
490 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
491 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
492 DEBUGOUT("Autoneg did not complete.\n");
497 /* Add delay to filter out noises during initial link setup */
504 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
505 * @hw: pointer to hardware structure
507 * The base drivers may require better control over SFP+ module
508 * PHY states. This includes selectively shutting down the Tx
509 * laser on the PHY, effectively halting physical link.
511 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
513 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
515 /* Disable tx laser; allow 100us to go dark per spec */
516 esdp_reg |= IXGBE_ESDP_SDP3;
517 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
518 IXGBE_WRITE_FLUSH(hw);
523 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
524 * @hw: pointer to hardware structure
526 * The base drivers may require better control over SFP+ module
527 * PHY states. This includes selectively turning on the Tx
528 * laser on the PHY, effectively starting physical link.
530 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
532 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
534 /* Enable tx laser; allow 100ms to light up */
535 esdp_reg &= ~IXGBE_ESDP_SDP3;
536 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
537 IXGBE_WRITE_FLUSH(hw);
542 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
543 * @hw: pointer to hardware structure
545 * When the driver changes the link speeds that it can support,
546 * it sets autotry_restart to TRUE to indicate that we need to
547 * initiate a new autotry session with the link partner. To do
548 * so, we set the speed then disable and re-enable the tx laser, to
549 * alert the link partner that it also needs to restart autotry on its
550 * end. This is consistent with TRUE clause 37 autoneg, which also
551 * involves a loss of signal.
553 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
555 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
557 if (hw->mac.autotry_restart) {
558 ixgbe_disable_tx_laser_multispeed_fiber(hw);
559 ixgbe_enable_tx_laser_multispeed_fiber(hw);
560 hw->mac.autotry_restart = FALSE;
565 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
566 * @hw: pointer to hardware structure
567 * @speed: new link speed
568 * @autoneg: TRUE if autonegotiation enabled
569 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
571 * Set the link speed in the AUTOC register and restarts link.
573 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
574 ixgbe_link_speed speed, bool autoneg,
575 bool autoneg_wait_to_complete)
577 s32 status = IXGBE_SUCCESS;
578 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
579 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
581 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
583 bool link_up = FALSE;
586 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
588 /* Mask off requested but non-supported speeds */
589 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
590 if (status != IXGBE_SUCCESS)
596 * Try each speed one by one, highest priority first. We do this in
597 * software because 10gb fiber doesn't support speed autonegotiation.
599 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
601 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
603 /* If we already have link at this speed, just jump out */
604 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
605 if (status != IXGBE_SUCCESS)
608 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
611 /* Set the module link speed */
612 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
613 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
614 IXGBE_WRITE_FLUSH(hw);
616 /* Allow module to change analog characteristics (1G->10G) */
619 status = ixgbe_setup_mac_link_82599(hw,
620 IXGBE_LINK_SPEED_10GB_FULL,
622 autoneg_wait_to_complete);
623 if (status != IXGBE_SUCCESS)
626 /* Flap the tx laser if it has not already been done */
627 ixgbe_flap_tx_laser(hw);
630 * Wait for the controller to acquire link. Per IEEE 802.3ap,
631 * Section 73.10.2, we may have to wait up to 500ms if KR is
632 * attempted. 82599 uses the same timing for 10g SFI.
634 for (i = 0; i < 5; i++) {
635 /* Wait for the link partner to also set speed */
638 /* If we have link, just jump out */
639 status = ixgbe_check_link(hw, &link_speed,
641 if (status != IXGBE_SUCCESS)
649 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
651 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
652 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
654 /* If we already have link at this speed, just jump out */
655 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
656 if (status != IXGBE_SUCCESS)
659 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
662 /* Set the module link speed */
663 esdp_reg &= ~IXGBE_ESDP_SDP5;
664 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
665 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
666 IXGBE_WRITE_FLUSH(hw);
668 /* Allow module to change analog characteristics (10G->1G) */
671 status = ixgbe_setup_mac_link_82599(hw,
672 IXGBE_LINK_SPEED_1GB_FULL,
674 autoneg_wait_to_complete);
675 if (status != IXGBE_SUCCESS)
678 /* Flap the tx laser if it has not already been done */
679 ixgbe_flap_tx_laser(hw);
681 /* Wait for the link partner to also set speed */
684 /* If we have link, just jump out */
685 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
686 if (status != IXGBE_SUCCESS)
694 * We didn't get link. Configure back to the highest speed we tried,
695 * (if there was more than one). We call ourselves back with just the
696 * single highest speed that the user requested.
699 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
700 highest_link_speed, autoneg, autoneg_wait_to_complete);
703 /* Set autoneg_advertised value based on input link speed */
704 hw->phy.autoneg_advertised = 0;
706 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
707 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
709 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
710 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
716 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
717 * @hw: pointer to hardware structure
718 * @speed: new link speed
719 * @autoneg: TRUE if autonegotiation enabled
720 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
722 * Implements the Intel SmartSpeed algorithm.
724 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
725 ixgbe_link_speed speed, bool autoneg,
726 bool autoneg_wait_to_complete)
728 s32 status = IXGBE_SUCCESS;
729 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
731 bool link_up = FALSE;
732 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
734 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
736 /* Set autoneg_advertised value based on input link speed */
737 hw->phy.autoneg_advertised = 0;
739 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
740 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
742 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
743 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
745 if (speed & IXGBE_LINK_SPEED_100_FULL)
746 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
749 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
750 * autoneg advertisement if link is unable to be established at the
751 * highest negotiated rate. This can sometimes happen due to integrity
752 * issues with the physical media connection.
755 /* First, try to get link with full advertisement */
756 hw->phy.smart_speed_active = FALSE;
757 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
758 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
759 autoneg_wait_to_complete);
760 if (status != IXGBE_SUCCESS)
764 * Wait for the controller to acquire link. Per IEEE 802.3ap,
765 * Section 73.10.2, we may have to wait up to 500ms if KR is
766 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
767 * Table 9 in the AN MAS.
769 for (i = 0; i < 5; i++) {
772 /* If we have link, just jump out */
773 status = ixgbe_check_link(hw, &link_speed, &link_up,
775 if (status != IXGBE_SUCCESS)
784 * We didn't get link. If we advertised KR plus one of KX4/KX
785 * (or BX4/BX), then disable KR and try again.
787 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
788 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
791 /* Turn SmartSpeed on to disable KR support */
792 hw->phy.smart_speed_active = TRUE;
793 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
794 autoneg_wait_to_complete);
795 if (status != IXGBE_SUCCESS)
799 * Wait for the controller to acquire link. 600ms will allow for
800 * the AN link_fail_inhibit_timer as well for multiple cycles of
801 * parallel detect, both 10g and 1g. This allows for the maximum
802 * connect attempts as defined in the AN MAS table 73-7.
804 for (i = 0; i < 6; i++) {
807 /* If we have link, just jump out */
808 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
809 if (status != IXGBE_SUCCESS)
816 /* We didn't get link. Turn SmartSpeed back off. */
817 hw->phy.smart_speed_active = FALSE;
818 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
819 autoneg_wait_to_complete);
822 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
823 DEBUGOUT("Smartspeed has downgraded the link speed "
824 "from the maximum advertised\n");
829 * ixgbe_setup_mac_link_82599 - Set MAC link speed
830 * @hw: pointer to hardware structure
831 * @speed: new link speed
832 * @autoneg: TRUE if autonegotiation enabled
833 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
835 * Set the link speed in the AUTOC register and restarts link.
837 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
838 ixgbe_link_speed speed, bool autoneg,
839 bool autoneg_wait_to_complete)
841 s32 status = IXGBE_SUCCESS;
842 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
843 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
844 u32 start_autoc = autoc;
846 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
847 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
848 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
851 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
853 DEBUGFUNC("ixgbe_setup_mac_link_82599");
855 /* Check to see if speed passed in is supported. */
856 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
857 if (status != IXGBE_SUCCESS)
860 speed &= link_capabilities;
862 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
863 status = IXGBE_ERR_LINK_SETUP;
867 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
868 if (hw->mac.orig_link_settings_stored)
869 orig_autoc = hw->mac.orig_autoc;
873 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
874 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
875 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
876 /* Set KX4/KX/KR support according to speed requested */
877 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
878 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
879 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
880 autoc |= IXGBE_AUTOC_KX4_SUPP;
881 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
882 (hw->phy.smart_speed_active == FALSE))
883 autoc |= IXGBE_AUTOC_KR_SUPP;
884 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
885 autoc |= IXGBE_AUTOC_KX_SUPP;
886 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
887 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
888 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
889 /* Switch from 1G SFI to 10G SFI if requested */
890 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
891 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
892 autoc &= ~IXGBE_AUTOC_LMS_MASK;
893 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
895 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
896 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
897 /* Switch from 10G SFI to 1G SFI if requested */
898 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
899 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
900 autoc &= ~IXGBE_AUTOC_LMS_MASK;
902 autoc |= IXGBE_AUTOC_LMS_1G_AN;
904 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
908 if (autoc != start_autoc) {
910 autoc |= IXGBE_AUTOC_AN_RESTART;
911 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
913 /* Only poll for autoneg to complete if specified to do so */
914 if (autoneg_wait_to_complete) {
915 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
916 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
917 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
918 links_reg = 0; /*Just in case Autoneg time=0*/
919 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
921 IXGBE_READ_REG(hw, IXGBE_LINKS);
922 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
926 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
928 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
929 DEBUGOUT("Autoneg did not complete.\n");
934 /* Add delay to filter out noises during initial link setup */
943 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
944 * @hw: pointer to hardware structure
945 * @speed: new link speed
946 * @autoneg: TRUE if autonegotiation enabled
947 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
949 * Restarts link on PHY and MAC based on settings passed in.
951 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
952 ixgbe_link_speed speed,
954 bool autoneg_wait_to_complete)
958 DEBUGFUNC("ixgbe_setup_copper_link_82599");
960 /* Setup the PHY according to input speed */
961 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
962 autoneg_wait_to_complete);
964 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
970 * ixgbe_reset_hw_82599 - Perform hardware reset
971 * @hw: pointer to hardware structure
973 * Resets the hardware by resetting the transmit and receive units, masks
974 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
977 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
979 s32 status = IXGBE_SUCCESS;
985 DEBUGFUNC("ixgbe_reset_hw_82599");
987 /* Call adapter stop to disable tx/rx and clear interrupts */
988 hw->mac.ops.stop_adapter(hw);
990 /* PHY ops must be identified and initialized prior to reset */
992 /* Identify PHY and related function pointers */
993 status = hw->phy.ops.init(hw);
995 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
998 /* Setup SFP module if there is one present. */
999 if (hw->phy.sfp_setup_needed) {
1000 status = hw->mac.ops.setup_sfp(hw);
1001 hw->phy.sfp_setup_needed = FALSE;
1004 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1008 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1009 hw->phy.ops.reset(hw);
1012 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1013 * access and verify no pending requests before reset
1015 ixgbe_disable_pcie_master(hw);
1019 * Issue global reset to the MAC. This needs to be a SW reset.
1020 * If link reset is used, it might reset the MAC when mng is using it
1022 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1023 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
1024 IXGBE_WRITE_FLUSH(hw);
1026 /* Poll for reset bit to self-clear indicating reset is complete */
1027 for (i = 0; i < 10; i++) {
1029 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1030 if (!(ctrl & IXGBE_CTRL_RST))
1033 if (ctrl & IXGBE_CTRL_RST) {
1034 status = IXGBE_ERR_RESET_FAILED;
1035 DEBUGOUT("Reset polling failed to complete.\n");
1039 * Double resets are required for recovery from certain error
1040 * conditions. Between resets, it is necessary to stall to allow time
1041 * for any pending HW events to complete. We use 1usec since that is
1042 * what is needed for ixgbe_disable_pcie_master(). The second reset
1043 * then clears out any effects of those events.
1045 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1046 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1054 * Store the original AUTOC/AUTOC2 values if they have not been
1055 * stored off yet. Otherwise restore the stored original
1056 * values since the reset operation sets back to defaults.
1058 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1059 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1060 if (hw->mac.orig_link_settings_stored == FALSE) {
1061 hw->mac.orig_autoc = autoc;
1062 hw->mac.orig_autoc2 = autoc2;
1063 hw->mac.orig_link_settings_stored = TRUE;
1065 if (autoc != hw->mac.orig_autoc)
1066 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
1067 IXGBE_AUTOC_AN_RESTART));
1069 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1070 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1071 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1072 autoc2 |= (hw->mac.orig_autoc2 &
1073 IXGBE_AUTOC2_UPPER_MASK);
1074 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1078 /* Store the permanent mac address */
1079 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1082 * Store MAC address from RAR0, clear receive address registers, and
1083 * clear the multicast table. Also reset num_rar_entries to 128,
1084 * since we modify this value when programming the SAN MAC address.
1086 hw->mac.num_rar_entries = 128;
1087 hw->mac.ops.init_rx_addrs(hw);
1089 /* Store the permanent SAN mac address */
1090 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1092 /* Add the SAN MAC address to the RAR only if it's a valid address */
1093 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1094 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1095 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1097 /* Reserve the last RAR for the SAN MAC address */
1098 hw->mac.num_rar_entries--;
1101 /* Store the alternative WWNN/WWPN prefix */
1102 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1103 &hw->mac.wwpn_prefix);
1110 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1111 * @hw: pointer to hardware structure
1113 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1116 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1117 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1119 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1122 * Before starting reinitialization process,
1123 * FDIRCMD.CMD must be zero.
1125 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1126 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1127 IXGBE_FDIRCMD_CMD_MASK))
1131 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1132 DEBUGOUT("Flow Director previous command isn't complete, "
1133 "aborting table re-initialization. \n");
1134 return IXGBE_ERR_FDIR_REINIT_FAILED;
1137 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1138 IXGBE_WRITE_FLUSH(hw);
1140 * 82599 adapters flow director init flow cannot be restarted,
1141 * Workaround 82599 silicon errata by performing the following steps
1142 * before re-writing the FDIRCTRL control register with the same value.
1143 * - write 1 to bit 8 of FDIRCMD register &
1144 * - write 0 to bit 8 of FDIRCMD register
1146 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1147 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1148 IXGBE_FDIRCMD_CLEARHT));
1149 IXGBE_WRITE_FLUSH(hw);
1150 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1151 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1152 ~IXGBE_FDIRCMD_CLEARHT));
1153 IXGBE_WRITE_FLUSH(hw);
1155 * Clear FDIR Hash register to clear any leftover hashes
1156 * waiting to be programmed.
1158 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1159 IXGBE_WRITE_FLUSH(hw);
1161 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1162 IXGBE_WRITE_FLUSH(hw);
1164 /* Poll init-done after we write FDIRCTRL register */
1165 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1166 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1167 IXGBE_FDIRCTRL_INIT_DONE)
1171 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1172 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1173 return IXGBE_ERR_FDIR_REINIT_FAILED;
1176 /* Clear FDIR statistics registers (read to clear) */
1177 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1178 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1179 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1180 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1181 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1183 return IXGBE_SUCCESS;
1187 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1188 * @hw: pointer to hardware structure
1189 * @pballoc: which mode to allocate filters with
1191 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1197 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1200 * Before enabling Flow Director, the Rx Packet Buffer size
1201 * must be reduced. The new value is the current size minus
1202 * flow director memory usage size.
1204 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1205 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1206 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1209 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1210 * intialized to zero for non DCB mode otherwise actual total RX PB
1211 * would be bigger than programmed and filter space would run into
1214 for (i = 1; i < 8; i++)
1215 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1217 /* Send interrupt when 64 filters are left */
1218 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1220 /* Set the maximum length per hash bucket to 0xA filters */
1221 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1224 case IXGBE_FDIR_PBALLOC_64K:
1225 /* 8k - 1 signature filters */
1226 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1228 case IXGBE_FDIR_PBALLOC_128K:
1229 /* 16k - 1 signature filters */
1230 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1232 case IXGBE_FDIR_PBALLOC_256K:
1233 /* 32k - 1 signature filters */
1234 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1238 return IXGBE_ERR_CONFIG;
1241 /* Move the flexible bytes to use the ethertype - shift 6 words */
1242 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1245 /* Prime the keys for hashing */
1246 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1247 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1250 * Poll init-done after we write the register. Estimated times:
1251 * 10G: PBALLOC = 11b, timing is 60us
1252 * 1G: PBALLOC = 11b, timing is 600us
1253 * 100M: PBALLOC = 11b, timing is 6ms
1255 * Multiple these timings by 4 if under full Rx load
1257 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1258 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1259 * this might not finish in our poll time, but we can live with that
1262 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1263 IXGBE_WRITE_FLUSH(hw);
1264 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1265 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1266 IXGBE_FDIRCTRL_INIT_DONE)
1270 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1271 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1273 return IXGBE_SUCCESS;
1277 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1278 * @hw: pointer to hardware structure
1279 * @pballoc: which mode to allocate filters with
1281 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1287 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1290 * Before enabling Flow Director, the Rx Packet Buffer size
1291 * must be reduced. The new value is the current size minus
1292 * flow director memory usage size.
1294 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1295 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1296 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1299 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1300 * intialized to zero for non DCB mode otherwise actual total RX PB
1301 * would be bigger than programmed and filter space would run into
1304 for (i = 1; i < 8; i++)
1305 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1307 /* Send interrupt when 64 filters are left */
1308 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1310 /* Initialize the drop queue to Rx queue 127 */
1311 fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1314 case IXGBE_FDIR_PBALLOC_64K:
1315 /* 2k - 1 perfect filters */
1316 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1318 case IXGBE_FDIR_PBALLOC_128K:
1319 /* 4k - 1 perfect filters */
1320 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1322 case IXGBE_FDIR_PBALLOC_256K:
1323 /* 8k - 1 perfect filters */
1324 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1328 return IXGBE_ERR_CONFIG;
1331 /* Turn perfect match filtering on */
1332 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1333 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1335 /* Move the flexible bytes to use the ethertype - shift 6 words */
1336 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1338 /* Prime the keys for hashing */
1339 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1340 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,IXGBE_ATR_SIGNATURE_HASH_KEY);
1343 * Poll init-done after we write the register. Estimated times:
1344 * 10G: PBALLOC = 11b, timing is 60us
1345 * 1G: PBALLOC = 11b, timing is 600us
1346 * 100M: PBALLOC = 11b, timing is 6ms
1348 * Multiple these timings by 4 if under full Rx load
1350 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1351 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1352 * this might not finish in our poll time, but we can live with that
1356 /* Set the maximum length per hash bucket to 0xA filters */
1357 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1359 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1360 IXGBE_WRITE_FLUSH(hw);
1361 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1362 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1363 IXGBE_FDIRCTRL_INIT_DONE)
1367 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1368 DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
1370 return IXGBE_SUCCESS;
1374 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1375 * @stream: input bitstream to compute the hash on
1376 * @key: 32-bit hash key
1378 u16 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
1382 * The algorithm is as follows:
1383 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1384 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1385 * and A[n] x B[n] is bitwise AND between same length strings
1387 * K[n] is 16 bits, defined as:
1388 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1389 * for n modulo 32 < 15, K[n] =
1390 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1392 * S[n] is 16 bits, defined as:
1393 * for n >= 15, S[n] = S[n:n - 15]
1394 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1396 * To simplify for programming, the algorithm is implemented
1397 * in software this way:
1399 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
1401 * for (i = 0; i < 352; i+=32)
1402 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
1404 * lo_hash_dword[15:0] ^= Stream[15:0];
1405 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
1406 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
1408 * hi_hash_dword[31:0] ^= Stream[351:320];
1411 * hash[15:0] ^= Stream[15:0];
1413 * for (i = 0; i < 16; i++) {
1415 * hash[15:0] ^= lo_hash_dword[(i+15):i];
1417 * hash[15:0] ^= hi_hash_dword[(i+15):i];
1421 __be32 common_hash_dword = 0;
1422 u32 hi_hash_dword, lo_hash_dword;
1423 u16 hash_result = 0;
1427 * the hi_hash_dword starts with vlan_id, the lo_hash_dword starts
1428 * and ends with it, the vlan at the end is added via the word swapped
1429 * xor with the hi_hash_dword a few lines down.
1431 hi_hash_dword = IXGBE_NTOHL(atr_input->dword_stream[0]) & 0x0000FFFF;
1432 lo_hash_dword = hi_hash_dword;
1434 /* generate common hash dword */
1435 for (i = 1; i < 11; i++)
1436 common_hash_dword ^= (u32)atr_input->dword_stream[i];
1437 hi_hash_dword ^= IXGBE_NTOHL(common_hash_dword);
1439 /* low dword is word swapped version of common with vlan added */
1440 lo_hash_dword ^= (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1442 /* hi dword is common dword with l4type and vm_pool shifted */
1443 hi_hash_dword ^= IXGBE_NTOHL(atr_input->dword_stream[10]) << 16;
1446 * Process all 32 bits of the 2 keys 2 bits at a time
1448 * Bit flip vlan from hash result if hash key has bit 0 set, the
1449 * reason for doing this is because the hash generation shouldn't
1450 * start until bit 1 in the stream so we need to cancel out a vlan
1451 * if it was added starting at bit 0.
1454 hash_result ^= IXGBE_NTOHL(atr_input->dword_stream[0]) &
1456 hash_result ^= lo_hash_dword;
1458 if (key & 0x00010000)
1459 hash_result ^= hi_hash_dword;
1461 /* process the remaining bits in the key */
1462 for (i = 1; i < 16; i++) {
1463 if (key & (0x0001 << i))
1464 hash_result ^= lo_hash_dword >> i;
1465 if (key & (0x00010000 << i))
1466 hash_result ^= hi_hash_dword >> i;
1473 * These defines allow us to quickly generate all of the necessary instructions
1474 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1475 * for values 0 through 15
1477 #define IXGBE_ATR_COMMON_HASH_KEY \
1478 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1479 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1482 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) { \
1485 IXGBE_NTOHL(atr_input->dword_stream[0]) & \
1487 common_hash ^= lo_hash_dword >> n; \
1488 } else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) { \
1491 IXGBE_NTOHL(atr_input->dword_stream[0]) & \
1493 bucket_hash ^= lo_hash_dword >> n; \
1494 } else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) { \
1496 sig_hash ^= IXGBE_NTOHL(atr_input->dword_stream[0]) & \
1498 sig_hash ^= lo_hash_dword >> n; \
1500 if (IXGBE_ATR_COMMON_HASH_KEY & (0x010000 << n)) \
1501 common_hash ^= hi_hash_dword >> n; \
1502 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x010000 << n)) \
1503 bucket_hash ^= hi_hash_dword >> n; \
1504 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x010000 << n)) \
1505 sig_hash ^= hi_hash_dword >> n; \
1509 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1510 * @stream: input bitstream to compute the hash on
1512 * This function is almost identical to the function above but contains
1513 * several optomizations such as unwinding all of the loops, letting the
1514 * compiler work out all of the conditional ifs since the keys are static
1515 * defines, and computing two keys at once since the hashed dword stream
1516 * will be the same for both keys.
1518 static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_input *atr_input)
1520 u32 hi_hash_dword, lo_hash_dword;
1521 u16 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1524 * the hi_hash_dword starts with vlan_id, the lo_hash_dword starts
1525 * and ends with it, the vlan at the end is added via the word swapped
1526 * xor with the hi_hash_dword a few lines down. The part masked off
1527 * is the part of the hash reserved to 0.
1529 hi_hash_dword = IXGBE_NTOHL(atr_input->dword_stream[0]) & 0x0000FFFF;
1530 lo_hash_dword = hi_hash_dword;
1532 /* generate common hash dword */
1533 hi_hash_dword ^= IXGBE_NTOHL(atr_input->dword_stream[1] ^
1534 atr_input->dword_stream[2] ^
1535 atr_input->dword_stream[3] ^
1536 atr_input->dword_stream[4] ^
1537 atr_input->dword_stream[5] ^
1538 atr_input->dword_stream[6] ^
1539 atr_input->dword_stream[7] ^
1540 atr_input->dword_stream[8] ^
1541 atr_input->dword_stream[9] ^
1542 atr_input->dword_stream[10]);
1544 /* low dword is word swapped version of common */
1545 lo_hash_dword ^= (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1547 /* hi dword is common dword with l4type and vm_pool added */
1548 hi_hash_dword ^= IXGBE_NTOHL(atr_input->dword_stream[10]) << 16;
1551 * Process all 32 bits of the 2 keys 2 bits at a time
1553 * Bit flip vlan from hash result if hash key has bit 0 set, the
1554 * reason for doing this is because the hash generation shouldn't
1555 * start until bit 1 in the stream so we need to cancel out a vlan
1556 * if it was added starting at bit 0.
1558 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1559 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1560 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1561 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1562 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1563 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1564 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1565 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1566 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1567 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1568 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1569 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1570 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1571 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1572 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1573 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1575 /* combine common_hash result with signature and bucket hashes */
1576 sig_hash ^= common_hash;
1577 bucket_hash ^= common_hash;
1579 /* return completed signature hash */
1580 return ((u32)sig_hash << 16) | (bucket_hash & IXGBE_ATR_HASH_MASK);
1584 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1585 * @input: input stream to modify
1586 * @vlan: the VLAN id to load
1588 s32 ixgbe_atr_set_vlan_id_82599(union ixgbe_atr_input *input, __be16 vlan)
1590 DEBUGFUNC("ixgbe_atr_set_vlan_id_82599");
1592 input->formatted.vlan_id = vlan;
1594 return IXGBE_SUCCESS;
1598 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1599 * @input: input stream to modify
1600 * @src_addr: the IP address to load
1602 s32 ixgbe_atr_set_src_ipv4_82599(union ixgbe_atr_input *input, __be32 src_addr)
1604 DEBUGFUNC("ixgbe_atr_set_src_ipv4_82599");
1606 input->formatted.src_ip[0] = src_addr;
1608 return IXGBE_SUCCESS;
1612 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1613 * @input: input stream to modify
1614 * @dst_addr: the IP address to load
1616 s32 ixgbe_atr_set_dst_ipv4_82599(union ixgbe_atr_input *input, __be32 dst_addr)
1618 DEBUGFUNC("ixgbe_atr_set_dst_ipv4_82599");
1620 input->formatted.dst_ip[0] = dst_addr;
1622 return IXGBE_SUCCESS;
1626 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1627 * @input: input stream to modify
1628 * @src_addr_0: the first 4 bytes of the IP address to load
1629 * @src_addr_1: the second 4 bytes of the IP address to load
1630 * @src_addr_2: the third 4 bytes of the IP address to load
1631 * @src_addr_3: the fourth 4 bytes of the IP address to load
1633 s32 ixgbe_atr_set_src_ipv6_82599(union ixgbe_atr_input *input,
1634 __be32 src_addr_0, __be32 src_addr_1,
1635 __be32 src_addr_2, __be32 src_addr_3)
1637 DEBUGFUNC("ixgbe_atr_set_src_ipv6_82599");
1639 input->formatted.src_ip[0] = src_addr_0;
1640 input->formatted.src_ip[1] = src_addr_1;
1641 input->formatted.src_ip[2] = src_addr_2;
1642 input->formatted.src_ip[3] = src_addr_3;
1644 return IXGBE_SUCCESS;
1648 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1649 * @input: input stream to modify
1650 * @dst_addr_0: the first 4 bytes of the IP address to load
1651 * @dst_addr_1: the second 4 bytes of the IP address to load
1652 * @dst_addr_2: the third 4 bytes of the IP address to load
1653 * @dst_addr_3: the fourth 4 bytes of the IP address to load
1655 s32 ixgbe_atr_set_dst_ipv6_82599(union ixgbe_atr_input *input,
1656 __be32 dst_addr_0, __be32 dst_addr_1,
1657 __be32 dst_addr_2, __be32 dst_addr_3)
1659 DEBUGFUNC("ixgbe_atr_set_dst_ipv6_82599");
1661 input->formatted.dst_ip[0] = dst_addr_0;
1662 input->formatted.dst_ip[1] = dst_addr_1;
1663 input->formatted.dst_ip[2] = dst_addr_2;
1664 input->formatted.dst_ip[3] = dst_addr_3;
1666 return IXGBE_SUCCESS;
1670 * ixgbe_atr_set_src_port_82599 - Sets the source port
1671 * @input: input stream to modify
1672 * @src_port: the source port to load
1674 s32 ixgbe_atr_set_src_port_82599(union ixgbe_atr_input *input, __be16 src_port)
1676 DEBUGFUNC("ixgbe_atr_set_src_port_82599");
1678 input->formatted.src_port = src_port;
1680 return IXGBE_SUCCESS;
1684 * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1685 * @input: input stream to modify
1686 * @dst_port: the destination port to load
1688 s32 ixgbe_atr_set_dst_port_82599(union ixgbe_atr_input *input, __be16 dst_port)
1690 DEBUGFUNC("ixgbe_atr_set_dst_port_82599");
1692 input->formatted.dst_port = dst_port;
1694 return IXGBE_SUCCESS;
1698 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1699 * @input: input stream to modify
1700 * @flex_bytes: the flexible bytes to load
1702 s32 ixgbe_atr_set_flex_byte_82599(union ixgbe_atr_input *input, __be16 flex_bytes)
1704 DEBUGFUNC("ixgbe_atr_set_flex_byte_82599");
1706 input->formatted.flex_bytes = flex_bytes;
1708 return IXGBE_SUCCESS;
1712 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1713 * @input: input stream to modify
1714 * @vm_pool: the Virtual Machine pool to load
1716 s32 ixgbe_atr_set_vm_pool_82599(union ixgbe_atr_input *input, u8 vm_pool)
1718 DEBUGFUNC("ixgbe_atr_set_vm_pool_82599");
1720 input->formatted.vm_pool = vm_pool;
1722 return IXGBE_SUCCESS;
1726 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1727 * @input: input stream to modify
1728 * @l4type: the layer 4 type value to load
1730 * This call is deprecated and should be replaced with a direct access to
1731 * input->formatted.flow_type.
1733 s32 ixgbe_atr_set_l4type_82599(union ixgbe_atr_input *input, u8 l4type)
1735 DEBUGFUNC("ixgbe_atr_set_l4type_82599");
1737 input->formatted.flow_type = l4type;
1739 return IXGBE_SUCCESS;
1743 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1744 * @input: input stream to search
1745 * @vlan: the VLAN id to load
1747 s32 ixgbe_atr_get_vlan_id_82599(union ixgbe_atr_input *input, __be16 *vlan)
1749 DEBUGFUNC("ixgbe_atr_get_vlan_id_82599");
1751 *vlan = input->formatted.vlan_id;
1753 return IXGBE_SUCCESS;
1757 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1758 * @input: input stream to search
1759 * @src_addr: the IP address to load
1761 s32 ixgbe_atr_get_src_ipv4_82599(union ixgbe_atr_input *input, __be32 *src_addr)
1763 DEBUGFUNC("ixgbe_atr_get_src_ipv4_82599");
1765 *src_addr = input->formatted.src_ip[0];
1767 return IXGBE_SUCCESS;
1771 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
1772 * @input: input stream to search
1773 * @dst_addr: the IP address to load
1775 s32 ixgbe_atr_get_dst_ipv4_82599(union ixgbe_atr_input *input, __be32 *dst_addr)
1777 DEBUGFUNC("ixgbe_atr_get_dst_ipv4_82599");
1779 *dst_addr = input->formatted.dst_ip[0];
1781 return IXGBE_SUCCESS;
1785 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
1786 * @input: input stream to search
1787 * @src_addr_0: the first 4 bytes of the IP address to load
1788 * @src_addr_1: the second 4 bytes of the IP address to load
1789 * @src_addr_2: the third 4 bytes of the IP address to load
1790 * @src_addr_3: the fourth 4 bytes of the IP address to load
1792 s32 ixgbe_atr_get_src_ipv6_82599(union ixgbe_atr_input *input,
1793 __be32 *src_addr_0, __be32 *src_addr_1,
1794 __be32 *src_addr_2, __be32 *src_addr_3)
1796 DEBUGFUNC("ixgbe_atr_get_src_ipv6_82599");
1798 *src_addr_0 = input->formatted.src_ip[0];
1799 *src_addr_1 = input->formatted.src_ip[1];
1800 *src_addr_2 = input->formatted.src_ip[2];
1801 *src_addr_3 = input->formatted.src_ip[3];
1803 return IXGBE_SUCCESS;
1807 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
1808 * @input: input stream to search
1809 * @dst_addr_0: the first 4 bytes of the IP address to load
1810 * @dst_addr_1: the second 4 bytes of the IP address to load
1811 * @dst_addr_2: the third 4 bytes of the IP address to load
1812 * @dst_addr_3: the fourth 4 bytes of the IP address to load
1814 s32 ixgbe_atr_get_dst_ipv6_82599(union ixgbe_atr_input *input,
1815 __be32 *dst_addr_0, __be32 *dst_addr_1,
1816 __be32 *dst_addr_2, __be32 *dst_addr_3)
1818 DEBUGFUNC("ixgbe_atr_get_dst_ipv6_82599");
1820 *dst_addr_0 = input->formatted.dst_ip[0];
1821 *dst_addr_1 = input->formatted.dst_ip[1];
1822 *dst_addr_2 = input->formatted.dst_ip[2];
1823 *dst_addr_3 = input->formatted.dst_ip[3];
1825 return IXGBE_SUCCESS;
1829 * ixgbe_atr_get_src_port_82599 - Gets the source port
1830 * @input: input stream to modify
1831 * @src_port: the source port to load
1833 * Even though the input is given in big-endian, the FDIRPORT registers
1834 * expect the ports to be programmed in little-endian. Hence the need to swap
1835 * endianness when retrieving the data. This can be confusing since the
1836 * internal hash engine expects it to be big-endian.
1838 s32 ixgbe_atr_get_src_port_82599(union ixgbe_atr_input *input, __be16 *src_port)
1840 DEBUGFUNC("ixgbe_atr_get_src_port_82599");
1842 *src_port = input->formatted.src_port;
1844 return IXGBE_SUCCESS;
1848 * ixgbe_atr_get_dst_port_82599 - Gets the destination port
1849 * @input: input stream to modify
1850 * @dst_port: the destination port to load
1852 * Even though the input is given in big-endian, the FDIRPORT registers
1853 * expect the ports to be programmed in little-endian. Hence the need to swap
1854 * endianness when retrieving the data. This can be confusing since the
1855 * internal hash engine expects it to be big-endian.
1857 s32 ixgbe_atr_get_dst_port_82599(union ixgbe_atr_input *input, __be16 *dst_port)
1859 DEBUGFUNC("ixgbe_atr_get_dst_port_82599");
1861 *dst_port = input->formatted.dst_port;
1863 return IXGBE_SUCCESS;
1867 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
1868 * @input: input stream to modify
1869 * @flex_bytes: the flexible bytes to load
1871 s32 ixgbe_atr_get_flex_byte_82599(union ixgbe_atr_input *input, __be16 *flex_bytes)
1873 DEBUGFUNC("ixgbe_atr_get_flex_byte_82599");
1875 *flex_bytes = input->formatted.flex_bytes;
1877 return IXGBE_SUCCESS;
1881 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
1882 * @input: input stream to modify
1883 * @vm_pool: the Virtual Machine pool to load
1885 s32 ixgbe_atr_get_vm_pool_82599(union ixgbe_atr_input *input, u8 *vm_pool)
1887 DEBUGFUNC("ixgbe_atr_get_vm_pool_82599");
1889 *vm_pool = input->formatted.vm_pool;
1891 return IXGBE_SUCCESS;
1895 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
1896 * @input: input stream to modify
1897 * @l4type: the layer 4 type value to load
1899 * This call is deprecated and should be replaced with a direct access to
1900 * input->formatted.flow_type.
1902 s32 ixgbe_atr_get_l4type_82599(union ixgbe_atr_input *input, u8 *l4type)
1904 DEBUGFUNC("ixgbe_atr_get_l4type__82599");
1906 *l4type = input->formatted.flow_type;
1908 return IXGBE_SUCCESS;
1912 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1913 * @hw: pointer to hardware structure
1914 * @stream: input bitstream
1915 * @queue: queue index to direct traffic to
1917 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1918 union ixgbe_atr_input *input,
1924 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1927 * Get the flow_type in order to program FDIRCMD properly
1928 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1930 switch (input->formatted.flow_type) {
1931 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1932 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1933 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1934 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1935 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1936 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1939 DEBUGOUT(" Error on flow type input\n");
1940 return IXGBE_ERR_CONFIG;
1943 /* configure FDIRCMD register */
1944 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1945 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1946 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1947 fdircmd |= ((u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
1950 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1951 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1953 fdirhashcmd = ((u64)fdircmd << 32) |
1954 ixgbe_atr_compute_sig_hash_82599(input);
1955 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1957 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1959 return IXGBE_SUCCESS;
1963 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1964 * @input_mask: mask to be bit swapped
1966 * The source and destination port masks for flow director are bit swapped
1967 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1968 * generate a correctly swapped value we need to bit swap the mask and that
1969 * is what is accomplished by this function.
1971 static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
1973 u32 mask = IXGBE_NTOHS(input_masks->dst_port_mask);
1974 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1975 mask |= IXGBE_NTOHS(input_masks->src_port_mask);
1976 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1977 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1978 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1979 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1983 * These two macros are meant to address the fact that we have registers
1984 * that are either all or in part big-endian. As a result on big-endian
1985 * systems we will end up byte swapping the value to little-endian before
1986 * it is byte swapped again and written to the hardware in the original
1987 * big-endian format.
1989 #define IXGBE_STORE_AS_BE32(_value) \
1990 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1991 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1993 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1994 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1996 #define IXGBE_STORE_AS_BE16(_value) \
1997 (((u16)(_value) >> 8) | ((u16)(_value) << 8))
2001 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2002 * @hw: pointer to hardware structure
2003 * @input: input bitstream
2004 * @input_masks: masks for the input bitstream
2005 * @soft_id: software index for the filters
2006 * @queue: queue index to direct traffic to
2008 * Note that the caller to this function must lock before calling, since the
2009 * hardware writes must be protected from one another.
2011 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2012 union ixgbe_atr_input *input,
2013 struct ixgbe_atr_input_masks *input_masks,
2014 u16 soft_id, u8 queue)
2018 u32 fdirport, fdirtcpm;
2020 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
2021 u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
2022 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
2024 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2027 * Check flow_type formatting, and bail out before we touch the hardware
2028 * if there's a configuration issue
2030 switch (input->formatted.flow_type) {
2031 case IXGBE_ATR_FLOW_TYPE_IPV4:
2032 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
2033 fdirm |= IXGBE_FDIRM_L4P;
2034 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2035 if (input_masks->dst_port_mask || input_masks->src_port_mask) {
2036 DEBUGOUT(" Error on src/dst port mask\n");
2037 return IXGBE_ERR_CONFIG;
2039 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2040 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2043 DEBUGOUT(" Error on flow type input\n");
2044 return IXGBE_ERR_CONFIG;
2048 * Program the relevant mask registers. If src/dst_port or src/dst_addr
2049 * are zero, then assume a full mask for that field. Also assume that
2050 * a VLAN of 0 is unspecified, so mask that out as well. L4type
2051 * cannot be masked out in this implementation.
2053 * This also assumes IPv4 only. IPv6 masking isn't supported at this
2058 switch (IXGBE_NTOHS(input_masks->vlan_id_mask) & 0xEFFF) {
2060 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
2061 fdirm &= ~IXGBE_FDIRM_VLANID;
2063 /* Unmask VLAN prio - bit 1 */
2064 fdirm &= ~IXGBE_FDIRM_VLANP;
2067 /* Unmask VLAN ID - bit 0 */
2068 fdirm &= ~IXGBE_FDIRM_VLANID;
2071 /* do nothing, vlans already masked */
2074 DEBUGOUT(" Error on VLAN mask\n");
2075 return IXGBE_ERR_CONFIG;
2078 if (input_masks->flex_mask & 0xFFFF) {
2079 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
2080 DEBUGOUT(" Error on flexible byte mask\n");
2081 return IXGBE_ERR_CONFIG;
2083 /* Unmask Flex Bytes - bit 4 */
2084 fdirm &= ~IXGBE_FDIRM_FLEX;
2087 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
2088 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
2090 /* store the TCP/UDP port masks, bit reversed from port layout */
2091 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
2093 /* write both the same so that UDP and TCP use the same mask */
2094 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
2095 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
2097 /* store source and destination IP masks (big-enian) */
2098 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
2099 ~input_masks->src_ip_mask[0]);
2100 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
2101 ~input_masks->dst_ip_mask[0]);
2103 /* Apply masks to input data */
2104 input->formatted.vlan_id &= input_masks->vlan_id_mask;
2105 input->formatted.flex_bytes &= input_masks->flex_mask;
2106 input->formatted.src_port &= input_masks->src_port_mask;
2107 input->formatted.dst_port &= input_masks->dst_port_mask;
2108 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
2109 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
2111 /* record vlan (little-endian) and flex_bytes(big-endian) */
2113 IXGBE_STORE_AS_BE16(IXGBE_NTOHS(input->formatted.flex_bytes));
2114 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
2115 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
2116 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
2118 /* record source and destination port (little-endian)*/
2119 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
2120 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
2121 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
2122 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
2124 /* record the first 32 bits of the destination address (big-endian) */
2125 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
2127 /* record the source address (big-endian) */
2128 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
2130 /* configure FDIRHASH register */
2131 fdirhash = ixgbe_atr_compute_sig_hash_82599(input);
2133 /* we only want the bucket hash so drop the upper 16 bits */
2134 fdirhash &= IXGBE_ATR_HASH_MASK;
2135 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
2136 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2138 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
2139 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
2140 fdircmd |= IXGBE_FDIRCMD_LAST;
2141 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
2142 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
2143 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
2145 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
2147 return IXGBE_SUCCESS;
2151 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2152 * @hw: pointer to hardware structure
2153 * @reg: analog register to read
2156 * Performs read operation to Omer analog register specified.
2158 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2162 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2164 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2166 IXGBE_WRITE_FLUSH(hw);
2168 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2169 *val = (u8)core_ctl;
2171 return IXGBE_SUCCESS;
2175 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2176 * @hw: pointer to hardware structure
2177 * @reg: atlas register to write
2178 * @val: value to write
2180 * Performs write operation to Omer analog register specified.
2182 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2186 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2188 core_ctl = (reg << 8) | val;
2189 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2190 IXGBE_WRITE_FLUSH(hw);
2193 return IXGBE_SUCCESS;
2197 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
2198 * @hw: pointer to hardware structure
2200 * Starts the hardware using the generic start_hw function
2201 * and the generation start_hw function.
2202 * Then performs revision-specific operations, if any.
2204 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
2206 s32 ret_val = IXGBE_SUCCESS;
2208 DEBUGFUNC("ixgbe_start_hw_rev_1__82599");
2210 ret_val = ixgbe_start_hw_generic(hw);
2211 if (ret_val != IXGBE_SUCCESS)
2214 ret_val = ixgbe_start_hw_gen2(hw);
2215 if (ret_val != IXGBE_SUCCESS)
2218 /* We need to run link autotry after the driver loads */
2219 hw->mac.autotry_restart = TRUE;
2221 if (ret_val == IXGBE_SUCCESS)
2222 ret_val = ixgbe_verify_fw_version_82599(hw);
2228 * ixgbe_identify_phy_82599 - Get physical layer module
2229 * @hw: pointer to hardware structure
2231 * Determines the physical layer module found on the current adapter.
2232 * If PHY already detected, maintains current PHY type in hw struct,
2233 * otherwise executes the PHY detection routine.
2235 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2237 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2239 DEBUGFUNC("ixgbe_identify_phy_82599");
2241 /* Detect PHY if not unknown - returns success if already detected. */
2242 status = ixgbe_identify_phy_generic(hw);
2243 if (status != IXGBE_SUCCESS) {
2244 /* 82599 10GBASE-T requires an external PHY */
2245 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2248 status = ixgbe_identify_sfp_module_generic(hw);
2251 /* Set PHY type none if no PHY detected */
2252 if (hw->phy.type == ixgbe_phy_unknown) {
2253 hw->phy.type = ixgbe_phy_none;
2254 status = IXGBE_SUCCESS;
2257 /* Return error if SFP module has been detected but is not supported */
2258 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2259 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2266 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2267 * @hw: pointer to hardware structure
2269 * Determines physical layer capabilities of the current configuration.
2271 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2273 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2274 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2275 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2276 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2277 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2278 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2279 u16 ext_ability = 0;
2280 u8 comp_codes_10g = 0;
2281 u8 comp_codes_1g = 0;
2283 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2285 hw->phy.ops.identify(hw);
2287 switch (hw->phy.type) {
2290 case ixgbe_phy_cu_unknown:
2291 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2292 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2293 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2294 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2295 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2296 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2297 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2298 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2304 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2305 case IXGBE_AUTOC_LMS_1G_AN:
2306 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2307 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2308 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2309 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2312 /* SFI mode so read SFP module */
2315 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2316 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2317 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2318 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2319 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2320 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2321 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2324 case IXGBE_AUTOC_LMS_10G_SERIAL:
2325 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2326 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2328 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2331 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2332 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2333 if (autoc & IXGBE_AUTOC_KX_SUPP)
2334 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2335 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2336 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2337 if (autoc & IXGBE_AUTOC_KR_SUPP)
2338 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2347 /* SFP check must be done last since DA modules are sometimes used to
2348 * test KR mode - we need to id KR mode correctly before SFP module.
2349 * Call identify_sfp because the pluggable module may have changed */
2350 hw->phy.ops.identify_sfp(hw);
2351 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2354 switch (hw->phy.type) {
2355 case ixgbe_phy_sfp_passive_tyco:
2356 case ixgbe_phy_sfp_passive_unknown:
2357 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2359 case ixgbe_phy_sfp_ftl_active:
2360 case ixgbe_phy_sfp_active_unknown:
2361 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2363 case ixgbe_phy_sfp_avago:
2364 case ixgbe_phy_sfp_ftl:
2365 case ixgbe_phy_sfp_intel:
2366 case ixgbe_phy_sfp_unknown:
2367 hw->phy.ops.read_i2c_eeprom(hw,
2368 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2369 hw->phy.ops.read_i2c_eeprom(hw,
2370 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2371 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2372 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2373 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2374 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2375 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2376 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2383 return physical_layer;
2387 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2388 * @hw: pointer to hardware structure
2389 * @regval: register value to write to RXCTRL
2391 * Enables the Rx DMA unit for 82599
2393 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2395 #define IXGBE_MAX_SECRX_POLL 30
2399 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2402 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2403 * If traffic is incoming before we enable the Rx unit, it could hang
2404 * the Rx DMA unit. Therefore, make sure the security engine is
2405 * completely disabled prior to enabling the Rx unit.
2407 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2408 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2409 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2410 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2411 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2412 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2415 /* Use interrupt-safe sleep just in case */
2419 /* For informational purposes only */
2420 if (i >= IXGBE_MAX_SECRX_POLL)
2421 DEBUGOUT("Rx unit being enabled before security "
2422 "path fully disabled. Continuing with init.\n");
2424 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2425 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2426 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2427 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2428 IXGBE_WRITE_FLUSH(hw);
2430 return IXGBE_SUCCESS;
2434 * ixgbe_get_device_caps_82599 - Get additional device capabilities
2435 * @hw: pointer to hardware structure
2436 * @device_caps: the EEPROM word with the extra device capabilities
2438 * This function will read the EEPROM location for the device capabilities,
2439 * and return the word through device_caps.
2441 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2443 DEBUGFUNC("ixgbe_get_device_caps_82599");
2445 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
2447 return IXGBE_SUCCESS;
2451 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2452 * @hw: pointer to hardware structure
2454 * Verifies that installed the firmware version is 0.6 or higher
2455 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2457 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2458 * if the FW version is not supported.
2460 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2462 s32 status = IXGBE_ERR_EEPROM_VERSION;
2463 u16 fw_offset, fw_ptp_cfg_offset;
2466 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2468 /* firmware check is only necessary for SFI devices */
2469 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2470 status = IXGBE_SUCCESS;
2471 goto fw_version_out;
2474 /* get the offset to the Firmware Module block */
2475 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2477 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2478 goto fw_version_out;
2480 /* get the offset to the Pass Through Patch Configuration block */
2481 hw->eeprom.ops.read(hw, (fw_offset +
2482 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2483 &fw_ptp_cfg_offset);
2485 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2486 goto fw_version_out;
2488 /* get the firmware version */
2489 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2490 IXGBE_FW_PATCH_VERSION_4),
2493 if (fw_version > 0x5)
2494 status = IXGBE_SUCCESS;
2500 * ixgbe_enable_relaxed_ordering_82599 - Enable relaxed ordering
2501 * @hw: pointer to hardware structure
2504 void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw)
2509 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82599");
2511 /* Enable relaxed ordering */
2512 for (i = 0; i < hw->mac.max_tx_queues; i++) {
2513 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2514 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2515 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
2518 for (i = 0; i < hw->mac.max_rx_queues; i++) {
2519 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
2520 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
2521 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
2522 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);