1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
42 ixgbe_link_speed speed,
44 bool autoneg_wait_to_complete);
45 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
46 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
47 u16 offset, u16 *data);
48 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
49 u16 words, u16 *data);
51 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
53 struct ixgbe_mac_info *mac = &hw->mac;
55 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
57 /* enable the laser control functions for SFP+ fiber */
58 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
59 mac->ops.disable_tx_laser =
60 &ixgbe_disable_tx_laser_multispeed_fiber;
61 mac->ops.enable_tx_laser =
62 &ixgbe_enable_tx_laser_multispeed_fiber;
63 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
66 mac->ops.disable_tx_laser = NULL;
67 mac->ops.enable_tx_laser = NULL;
68 mac->ops.flap_tx_laser = NULL;
71 if (hw->phy.multispeed_fiber) {
72 /* Set up dual speed SFP+ support */
73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
75 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
76 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
77 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
78 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
79 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
81 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
87 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
88 * @hw: pointer to hardware structure
90 * Initialize any function pointers that were not able to be
91 * set during init_shared_code because the PHY/SFP type was
92 * not known. Perform the SFP init if necessary.
95 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
97 struct ixgbe_mac_info *mac = &hw->mac;
98 struct ixgbe_phy_info *phy = &hw->phy;
99 s32 ret_val = IXGBE_SUCCESS;
101 DEBUGFUNC("ixgbe_init_phy_ops_82599");
103 /* Identify the PHY or SFP module */
104 ret_val = phy->ops.identify(hw);
105 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
106 goto init_phy_ops_out;
108 /* Setup function pointers based on detected SFP module and speeds */
109 ixgbe_init_mac_link_ops_82599(hw);
110 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
111 hw->phy.ops.reset = NULL;
113 /* If copper media, overwrite with copper function pointers */
114 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
115 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
116 mac->ops.get_link_capabilities =
117 &ixgbe_get_copper_link_capabilities_generic;
120 /* Set necessary function pointers based on phy type */
121 switch (hw->phy.type) {
123 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
124 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
125 phy->ops.get_firmware_version =
126 &ixgbe_get_phy_firmware_version_tnx;
135 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
137 s32 ret_val = IXGBE_SUCCESS;
140 u16 list_offset, data_offset, data_value;
142 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
144 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
145 ixgbe_init_mac_link_ops_82599(hw);
147 hw->phy.ops.reset = NULL;
149 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
151 if (ret_val != IXGBE_SUCCESS)
154 /* PHY config will finish before releasing the semaphore */
155 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
156 IXGBE_GSSR_MAC_CSR_SM);
157 if (ret_val != IXGBE_SUCCESS) {
158 ret_val = IXGBE_ERR_SWFW_SYNC;
162 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
163 while (data_value != 0xffff) {
164 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
165 IXGBE_WRITE_FLUSH(hw);
166 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
169 /* Release the semaphore */
170 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
171 /* Delay obtaining semaphore again to allow FW access */
172 msec_delay(hw->eeprom.semaphore_delay);
174 /* Now restart DSP by setting Restart_AN and clearing LMS */
175 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
176 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
177 IXGBE_AUTOC_AN_RESTART));
179 /* Wait for AN to leave state 0 */
180 for (i = 0; i < 10; i++) {
182 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
183 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
186 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
187 DEBUGOUT("sfp module setup not complete\n");
188 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
192 /* Restart DSP by setting Restart_AN and return to SFI mode */
193 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
194 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
195 IXGBE_AUTOC_AN_RESTART));
203 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
204 * @hw: pointer to hardware structure
206 * Initialize the function pointers and assign the MAC type for 82599.
207 * Does not touch the hardware.
210 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
212 struct ixgbe_mac_info *mac = &hw->mac;
213 struct ixgbe_phy_info *phy = &hw->phy;
214 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
217 DEBUGFUNC("ixgbe_init_ops_82599");
219 ret_val = ixgbe_init_phy_ops_generic(hw);
220 ret_val = ixgbe_init_ops_generic(hw);
223 phy->ops.identify = &ixgbe_identify_phy_82599;
224 phy->ops.init = &ixgbe_init_phy_ops_82599;
227 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
228 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
229 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
230 mac->ops.get_supported_physical_layer =
231 &ixgbe_get_supported_physical_layer_82599;
232 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
233 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
234 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
235 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
236 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
237 mac->ops.start_hw = &ixgbe_start_hw_82599;
238 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
239 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
240 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
241 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
242 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
244 /* RAR, Multicast, VLAN */
245 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
246 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
247 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
248 mac->rar_highwater = 1;
249 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
250 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
251 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
252 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
253 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
254 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
255 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
258 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
259 mac->ops.check_link = &ixgbe_check_mac_link_generic;
260 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
261 ixgbe_init_mac_link_ops_82599(hw);
263 mac->mcft_size = 128;
265 mac->num_rar_entries = 128;
266 mac->rx_pb_size = 512;
267 mac->max_tx_queues = 128;
268 mac->max_rx_queues = 128;
269 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
271 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
272 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
274 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
277 eeprom->ops.read = &ixgbe_read_eeprom_82599;
278 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
280 /* Manageability interface */
281 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
288 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
289 * @hw: pointer to hardware structure
290 * @speed: pointer to link speed
291 * @negotiation: TRUE when autoneg or autotry is enabled
293 * Determines the link capabilities by reading the AUTOC register.
295 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
296 ixgbe_link_speed *speed,
299 s32 status = IXGBE_SUCCESS;
302 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
305 /* Check if 1G SFP module. */
306 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
307 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
308 *speed = IXGBE_LINK_SPEED_1GB_FULL;
314 * Determine link capabilities based on the stored value of AUTOC,
315 * which represents EEPROM defaults. If AUTOC value has not
316 * been stored, use the current register values.
318 if (hw->mac.orig_link_settings_stored)
319 autoc = hw->mac.orig_autoc;
321 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
323 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
324 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
325 *speed = IXGBE_LINK_SPEED_1GB_FULL;
326 *negotiation = FALSE;
329 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
330 *speed = IXGBE_LINK_SPEED_10GB_FULL;
331 *negotiation = FALSE;
334 case IXGBE_AUTOC_LMS_1G_AN:
335 *speed = IXGBE_LINK_SPEED_1GB_FULL;
339 case IXGBE_AUTOC_LMS_10G_SERIAL:
340 *speed = IXGBE_LINK_SPEED_10GB_FULL;
341 *negotiation = FALSE;
344 case IXGBE_AUTOC_LMS_KX4_KX_KR:
345 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
346 *speed = IXGBE_LINK_SPEED_UNKNOWN;
347 if (autoc & IXGBE_AUTOC_KR_SUPP)
348 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
349 if (autoc & IXGBE_AUTOC_KX4_SUPP)
350 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
351 if (autoc & IXGBE_AUTOC_KX_SUPP)
352 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
356 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
357 *speed = IXGBE_LINK_SPEED_100_FULL;
358 if (autoc & IXGBE_AUTOC_KR_SUPP)
359 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
360 if (autoc & IXGBE_AUTOC_KX4_SUPP)
361 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
362 if (autoc & IXGBE_AUTOC_KX_SUPP)
363 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
367 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
368 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
369 *negotiation = FALSE;
373 status = IXGBE_ERR_LINK_SETUP;
378 if (hw->phy.multispeed_fiber) {
379 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
380 IXGBE_LINK_SPEED_1GB_FULL;
389 * ixgbe_get_media_type_82599 - Get media type
390 * @hw: pointer to hardware structure
392 * Returns the media type (fiber, copper, backplane)
394 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
396 enum ixgbe_media_type media_type;
398 DEBUGFUNC("ixgbe_get_media_type_82599");
400 /* Detect if there is a copper PHY attached. */
401 switch (hw->phy.type) {
402 case ixgbe_phy_cu_unknown:
404 media_type = ixgbe_media_type_copper;
410 switch (hw->device_id) {
411 case IXGBE_DEV_ID_82599_KX4:
412 case IXGBE_DEV_ID_82599_KX4_MEZZ:
413 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
414 case IXGBE_DEV_ID_82599_KR:
415 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
416 case IXGBE_DEV_ID_82599_XAUI_LOM:
417 /* Default device ID is mezzanine card KX/KX4 */
418 media_type = ixgbe_media_type_backplane;
420 case IXGBE_DEV_ID_82599_SFP:
421 case IXGBE_DEV_ID_82599_SFP_FCOE:
422 case IXGBE_DEV_ID_82599_SFP_EM:
423 case IXGBE_DEV_ID_82599EN_SFP:
424 media_type = ixgbe_media_type_fiber;
426 case IXGBE_DEV_ID_82599_CX4:
427 media_type = ixgbe_media_type_cx4;
429 case IXGBE_DEV_ID_82599_T3_LOM:
430 media_type = ixgbe_media_type_copper;
433 media_type = ixgbe_media_type_unknown;
441 * ixgbe_start_mac_link_82599 - Setup MAC link settings
442 * @hw: pointer to hardware structure
443 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
445 * Configures link settings based on values in the ixgbe_hw struct.
446 * Restarts the link. Performs autonegotiation if needed.
448 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
449 bool autoneg_wait_to_complete)
454 s32 status = IXGBE_SUCCESS;
456 DEBUGFUNC("ixgbe_start_mac_link_82599");
460 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
461 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
462 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
464 /* Only poll for autoneg to complete if specified to do so */
465 if (autoneg_wait_to_complete) {
466 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
467 IXGBE_AUTOC_LMS_KX4_KX_KR ||
468 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
469 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
470 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
471 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
472 links_reg = 0; /* Just in case Autoneg time = 0 */
473 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
474 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
475 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
479 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
480 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
481 DEBUGOUT("Autoneg did not complete.\n");
486 /* Add delay to filter out noises during initial link setup */
493 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
494 * @hw: pointer to hardware structure
496 * The base drivers may require better control over SFP+ module
497 * PHY states. This includes selectively shutting down the Tx
498 * laser on the PHY, effectively halting physical link.
500 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
502 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
504 /* Disable tx laser; allow 100us to go dark per spec */
505 esdp_reg |= IXGBE_ESDP_SDP3;
506 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
507 IXGBE_WRITE_FLUSH(hw);
512 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
513 * @hw: pointer to hardware structure
515 * The base drivers may require better control over SFP+ module
516 * PHY states. This includes selectively turning on the Tx
517 * laser on the PHY, effectively starting physical link.
519 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
521 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
523 /* Enable tx laser; allow 100ms to light up */
524 esdp_reg &= ~IXGBE_ESDP_SDP3;
525 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
526 IXGBE_WRITE_FLUSH(hw);
531 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
532 * @hw: pointer to hardware structure
534 * When the driver changes the link speeds that it can support,
535 * it sets autotry_restart to TRUE to indicate that we need to
536 * initiate a new autotry session with the link partner. To do
537 * so, we set the speed then disable and re-enable the tx laser, to
538 * alert the link partner that it also needs to restart autotry on its
539 * end. This is consistent with TRUE clause 37 autoneg, which also
540 * involves a loss of signal.
542 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
544 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
546 if (hw->mac.autotry_restart) {
547 ixgbe_disable_tx_laser_multispeed_fiber(hw);
548 ixgbe_enable_tx_laser_multispeed_fiber(hw);
549 hw->mac.autotry_restart = FALSE;
554 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
555 * @hw: pointer to hardware structure
556 * @speed: new link speed
557 * @autoneg: TRUE if autonegotiation enabled
558 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
560 * Set the link speed in the AUTOC register and restarts link.
562 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
563 ixgbe_link_speed speed, bool autoneg,
564 bool autoneg_wait_to_complete)
566 s32 status = IXGBE_SUCCESS;
567 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
568 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
570 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
572 bool link_up = FALSE;
575 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
577 /* Mask off requested but non-supported speeds */
578 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
579 if (status != IXGBE_SUCCESS)
585 * Try each speed one by one, highest priority first. We do this in
586 * software because 10gb fiber doesn't support speed autonegotiation.
588 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
590 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
592 /* If we already have link at this speed, just jump out */
593 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
594 if (status != IXGBE_SUCCESS)
597 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
600 /* Set the module link speed */
601 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
602 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
603 IXGBE_WRITE_FLUSH(hw);
605 /* Allow module to change analog characteristics (1G->10G) */
608 status = ixgbe_setup_mac_link_82599(hw,
609 IXGBE_LINK_SPEED_10GB_FULL,
611 autoneg_wait_to_complete);
612 if (status != IXGBE_SUCCESS)
615 /* Flap the tx laser if it has not already been done */
616 ixgbe_flap_tx_laser(hw);
619 * Wait for the controller to acquire link. Per IEEE 802.3ap,
620 * Section 73.10.2, we may have to wait up to 500ms if KR is
621 * attempted. 82599 uses the same timing for 10g SFI.
623 for (i = 0; i < 5; i++) {
624 /* Wait for the link partner to also set speed */
627 /* If we have link, just jump out */
628 status = ixgbe_check_link(hw, &link_speed,
630 if (status != IXGBE_SUCCESS)
638 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
640 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
641 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
643 /* If we already have link at this speed, just jump out */
644 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
645 if (status != IXGBE_SUCCESS)
648 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
651 /* Set the module link speed */
652 esdp_reg &= ~IXGBE_ESDP_SDP5;
653 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
654 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
655 IXGBE_WRITE_FLUSH(hw);
657 /* Allow module to change analog characteristics (10G->1G) */
660 status = ixgbe_setup_mac_link_82599(hw,
661 IXGBE_LINK_SPEED_1GB_FULL,
663 autoneg_wait_to_complete);
664 if (status != IXGBE_SUCCESS)
667 /* Flap the tx laser if it has not already been done */
668 ixgbe_flap_tx_laser(hw);
670 /* Wait for the link partner to also set speed */
673 /* If we have link, just jump out */
674 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
675 if (status != IXGBE_SUCCESS)
683 * We didn't get link. Configure back to the highest speed we tried,
684 * (if there was more than one). We call ourselves back with just the
685 * single highest speed that the user requested.
688 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
689 highest_link_speed, autoneg, autoneg_wait_to_complete);
692 /* Set autoneg_advertised value based on input link speed */
693 hw->phy.autoneg_advertised = 0;
695 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
696 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
698 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
699 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
705 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
706 * @hw: pointer to hardware structure
707 * @speed: new link speed
708 * @autoneg: TRUE if autonegotiation enabled
709 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
711 * Implements the Intel SmartSpeed algorithm.
713 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
714 ixgbe_link_speed speed, bool autoneg,
715 bool autoneg_wait_to_complete)
717 s32 status = IXGBE_SUCCESS;
718 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
720 bool link_up = FALSE;
721 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
723 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
725 /* Set autoneg_advertised value based on input link speed */
726 hw->phy.autoneg_advertised = 0;
728 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
729 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
731 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
732 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
734 if (speed & IXGBE_LINK_SPEED_100_FULL)
735 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
738 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
739 * autoneg advertisement if link is unable to be established at the
740 * highest negotiated rate. This can sometimes happen due to integrity
741 * issues with the physical media connection.
744 /* First, try to get link with full advertisement */
745 hw->phy.smart_speed_active = FALSE;
746 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
747 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
748 autoneg_wait_to_complete);
749 if (status != IXGBE_SUCCESS)
753 * Wait for the controller to acquire link. Per IEEE 802.3ap,
754 * Section 73.10.2, we may have to wait up to 500ms if KR is
755 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
756 * Table 9 in the AN MAS.
758 for (i = 0; i < 5; i++) {
761 /* If we have link, just jump out */
762 status = ixgbe_check_link(hw, &link_speed, &link_up,
764 if (status != IXGBE_SUCCESS)
773 * We didn't get link. If we advertised KR plus one of KX4/KX
774 * (or BX4/BX), then disable KR and try again.
776 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
777 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
780 /* Turn SmartSpeed on to disable KR support */
781 hw->phy.smart_speed_active = TRUE;
782 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
783 autoneg_wait_to_complete);
784 if (status != IXGBE_SUCCESS)
788 * Wait for the controller to acquire link. 600ms will allow for
789 * the AN link_fail_inhibit_timer as well for multiple cycles of
790 * parallel detect, both 10g and 1g. This allows for the maximum
791 * connect attempts as defined in the AN MAS table 73-7.
793 for (i = 0; i < 6; i++) {
796 /* If we have link, just jump out */
797 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
798 if (status != IXGBE_SUCCESS)
805 /* We didn't get link. Turn SmartSpeed back off. */
806 hw->phy.smart_speed_active = FALSE;
807 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
808 autoneg_wait_to_complete);
811 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
812 DEBUGOUT("Smartspeed has downgraded the link speed "
813 "from the maximum advertised\n");
818 * ixgbe_setup_mac_link_82599 - Set MAC link speed
819 * @hw: pointer to hardware structure
820 * @speed: new link speed
821 * @autoneg: TRUE if autonegotiation enabled
822 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
824 * Set the link speed in the AUTOC register and restarts link.
826 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
827 ixgbe_link_speed speed, bool autoneg,
828 bool autoneg_wait_to_complete)
830 s32 status = IXGBE_SUCCESS;
831 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
832 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
833 u32 start_autoc = autoc;
835 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
836 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
837 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
840 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
842 DEBUGFUNC("ixgbe_setup_mac_link_82599");
844 /* Check to see if speed passed in is supported. */
845 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
846 if (status != IXGBE_SUCCESS)
849 speed &= link_capabilities;
851 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
852 status = IXGBE_ERR_LINK_SETUP;
856 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
857 if (hw->mac.orig_link_settings_stored)
858 orig_autoc = hw->mac.orig_autoc;
862 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
863 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
864 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
865 /* Set KX4/KX/KR support according to speed requested */
866 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
867 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
868 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
869 autoc |= IXGBE_AUTOC_KX4_SUPP;
870 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
871 (hw->phy.smart_speed_active == FALSE))
872 autoc |= IXGBE_AUTOC_KR_SUPP;
873 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
874 autoc |= IXGBE_AUTOC_KX_SUPP;
875 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
876 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
877 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
878 /* Switch from 1G SFI to 10G SFI if requested */
879 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
880 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
881 autoc &= ~IXGBE_AUTOC_LMS_MASK;
882 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
884 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
885 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
886 /* Switch from 10G SFI to 1G SFI if requested */
887 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
888 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
889 autoc &= ~IXGBE_AUTOC_LMS_MASK;
891 autoc |= IXGBE_AUTOC_LMS_1G_AN;
893 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
897 if (autoc != start_autoc) {
899 autoc |= IXGBE_AUTOC_AN_RESTART;
900 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
902 /* Only poll for autoneg to complete if specified to do so */
903 if (autoneg_wait_to_complete) {
904 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
905 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
906 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
907 links_reg = 0; /*Just in case Autoneg time=0*/
908 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
910 IXGBE_READ_REG(hw, IXGBE_LINKS);
911 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
915 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
917 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
918 DEBUGOUT("Autoneg did not complete.\n");
923 /* Add delay to filter out noises during initial link setup */
932 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
933 * @hw: pointer to hardware structure
934 * @speed: new link speed
935 * @autoneg: TRUE if autonegotiation enabled
936 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
938 * Restarts link on PHY and MAC based on settings passed in.
940 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
941 ixgbe_link_speed speed,
943 bool autoneg_wait_to_complete)
947 DEBUGFUNC("ixgbe_setup_copper_link_82599");
949 /* Setup the PHY according to input speed */
950 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
951 autoneg_wait_to_complete);
953 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
959 * ixgbe_reset_hw_82599 - Perform hardware reset
960 * @hw: pointer to hardware structure
962 * Resets the hardware by resetting the transmit and receive units, masks
963 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
966 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
968 ixgbe_link_speed link_speed;
970 u32 ctrl, i, autoc, autoc2;
971 bool link_up = FALSE;
973 DEBUGFUNC("ixgbe_reset_hw_82599");
975 /* Call adapter stop to disable tx/rx and clear interrupts */
976 status = hw->mac.ops.stop_adapter(hw);
977 if (status != IXGBE_SUCCESS)
980 /* flush pending Tx transactions */
981 ixgbe_clear_tx_pending(hw);
983 /* PHY ops must be identified and initialized prior to reset */
985 /* Identify PHY and related function pointers */
986 status = hw->phy.ops.init(hw);
988 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
991 /* Setup SFP module if there is one present. */
992 if (hw->phy.sfp_setup_needed) {
993 status = hw->mac.ops.setup_sfp(hw);
994 hw->phy.sfp_setup_needed = FALSE;
997 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1001 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1002 hw->phy.ops.reset(hw);
1006 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1007 * If link reset is used when link is up, it might reset the PHY when
1008 * mng is using it. If link is down or the flag to force full link
1009 * reset is set, then perform link reset.
1011 ctrl = IXGBE_CTRL_LNK_RST;
1012 if (!hw->force_full_reset) {
1013 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1015 ctrl = IXGBE_CTRL_RST;
1018 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1019 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1020 IXGBE_WRITE_FLUSH(hw);
1022 /* Poll for reset bit to self-clear indicating reset is complete */
1023 for (i = 0; i < 10; i++) {
1025 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1026 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1030 if (ctrl & IXGBE_CTRL_RST_MASK) {
1031 status = IXGBE_ERR_RESET_FAILED;
1032 DEBUGOUT("Reset polling failed to complete.\n");
1038 * Double resets are required for recovery from certain error
1039 * conditions. Between resets, it is necessary to stall to allow time
1040 * for any pending HW events to complete.
1042 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1043 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1048 * Store the original AUTOC/AUTOC2 values if they have not been
1049 * stored off yet. Otherwise restore the stored original
1050 * values since the reset operation sets back to defaults.
1052 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1053 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1054 if (hw->mac.orig_link_settings_stored == FALSE) {
1055 hw->mac.orig_autoc = autoc;
1056 hw->mac.orig_autoc2 = autoc2;
1057 hw->mac.orig_link_settings_stored = TRUE;
1059 if (autoc != hw->mac.orig_autoc)
1060 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
1061 IXGBE_AUTOC_AN_RESTART));
1063 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1064 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1065 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1066 autoc2 |= (hw->mac.orig_autoc2 &
1067 IXGBE_AUTOC2_UPPER_MASK);
1068 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1072 /* Store the permanent mac address */
1073 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1076 * Store MAC address from RAR0, clear receive address registers, and
1077 * clear the multicast table. Also reset num_rar_entries to 128,
1078 * since we modify this value when programming the SAN MAC address.
1080 hw->mac.num_rar_entries = 128;
1081 hw->mac.ops.init_rx_addrs(hw);
1083 /* Store the permanent SAN mac address */
1084 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1086 /* Add the SAN MAC address to the RAR only if it's a valid address */
1087 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1088 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1089 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1091 /* Reserve the last RAR for the SAN MAC address */
1092 hw->mac.num_rar_entries--;
1095 /* Store the alternative WWNN/WWPN prefix */
1096 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1097 &hw->mac.wwpn_prefix);
1104 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1105 * @hw: pointer to hardware structure
1107 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1110 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1111 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1113 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1116 * Before starting reinitialization process,
1117 * FDIRCMD.CMD must be zero.
1119 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1120 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1121 IXGBE_FDIRCMD_CMD_MASK))
1125 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1126 DEBUGOUT("Flow Director previous command isn't complete, "
1127 "aborting table re-initialization.\n");
1128 return IXGBE_ERR_FDIR_REINIT_FAILED;
1131 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1132 IXGBE_WRITE_FLUSH(hw);
1134 * 82599 adapters flow director init flow cannot be restarted,
1135 * Workaround 82599 silicon errata by performing the following steps
1136 * before re-writing the FDIRCTRL control register with the same value.
1137 * - write 1 to bit 8 of FDIRCMD register &
1138 * - write 0 to bit 8 of FDIRCMD register
1140 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1141 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1142 IXGBE_FDIRCMD_CLEARHT));
1143 IXGBE_WRITE_FLUSH(hw);
1144 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1145 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1146 ~IXGBE_FDIRCMD_CLEARHT));
1147 IXGBE_WRITE_FLUSH(hw);
1149 * Clear FDIR Hash register to clear any leftover hashes
1150 * waiting to be programmed.
1152 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1153 IXGBE_WRITE_FLUSH(hw);
1155 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1156 IXGBE_WRITE_FLUSH(hw);
1158 /* Poll init-done after we write FDIRCTRL register */
1159 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1160 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1161 IXGBE_FDIRCTRL_INIT_DONE)
1165 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1166 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1167 return IXGBE_ERR_FDIR_REINIT_FAILED;
1170 /* Clear FDIR statistics registers (read to clear) */
1171 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1172 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1173 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1174 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1175 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1177 return IXGBE_SUCCESS;
1181 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1182 * @hw: pointer to hardware structure
1183 * @fdirctrl: value to write to flow director control register
1185 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1189 DEBUGFUNC("ixgbe_fdir_enable_82599");
1191 /* Prime the keys for hashing */
1192 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1193 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1196 * Poll init-done after we write the register. Estimated times:
1197 * 10G: PBALLOC = 11b, timing is 60us
1198 * 1G: PBALLOC = 11b, timing is 600us
1199 * 100M: PBALLOC = 11b, timing is 6ms
1201 * Multiple these timings by 4 if under full Rx load
1203 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1204 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1205 * this might not finish in our poll time, but we can live with that
1208 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1209 IXGBE_WRITE_FLUSH(hw);
1210 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1211 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1212 IXGBE_FDIRCTRL_INIT_DONE)
1217 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1218 DEBUGOUT("Flow Director poll time exceeded!\n");
1222 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1223 * @hw: pointer to hardware structure
1224 * @fdirctrl: value to write to flow director control register, initially
1225 * contains just the value of the Rx packet buffer allocation
1227 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1229 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1232 * Continue setup of fdirctrl register bits:
1233 * Move the flexible bytes to use the ethertype - shift 6 words
1234 * Set the maximum length per hash bucket to 0xA filters
1235 * Send interrupt when 64 filters are left
1237 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1238 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1239 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1241 /* write hashes and fdirctrl register, poll for completion */
1242 ixgbe_fdir_enable_82599(hw, fdirctrl);
1244 return IXGBE_SUCCESS;
1248 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1249 * @hw: pointer to hardware structure
1250 * @fdirctrl: value to write to flow director control register, initially
1251 * contains just the value of the Rx packet buffer allocation
1253 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1255 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1258 * Continue setup of fdirctrl register bits:
1259 * Turn perfect match filtering on
1260 * Report hash in RSS field of Rx wb descriptor
1261 * Initialize the drop queue
1262 * Move the flexible bytes to use the ethertype - shift 6 words
1263 * Set the maximum length per hash bucket to 0xA filters
1264 * Send interrupt when 64 (0x4 * 16) filters are left
1266 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1267 IXGBE_FDIRCTRL_REPORT_STATUS |
1268 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1269 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1270 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1271 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1273 /* write hashes and fdirctrl register, poll for completion */
1274 ixgbe_fdir_enable_82599(hw, fdirctrl);
1276 return IXGBE_SUCCESS;
1280 * These defines allow us to quickly generate all of the necessary instructions
1281 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1282 * for values 0 through 15
1284 #define IXGBE_ATR_COMMON_HASH_KEY \
1285 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1286 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1289 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1290 common_hash ^= lo_hash_dword >> n; \
1291 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1292 bucket_hash ^= lo_hash_dword >> n; \
1293 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1294 sig_hash ^= lo_hash_dword << (16 - n); \
1295 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1296 common_hash ^= hi_hash_dword >> n; \
1297 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1298 bucket_hash ^= hi_hash_dword >> n; \
1299 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1300 sig_hash ^= hi_hash_dword << (16 - n); \
1304 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1305 * @stream: input bitstream to compute the hash on
1307 * This function is almost identical to the function above but contains
1308 * several optomizations such as unwinding all of the loops, letting the
1309 * compiler work out all of the conditional ifs since the keys are static
1310 * defines, and computing two keys at once since the hashed dword stream
1311 * will be the same for both keys.
1313 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1314 union ixgbe_atr_hash_dword common)
1316 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1317 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1319 /* record the flow_vm_vlan bits as they are a key part to the hash */
1320 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1322 /* generate common hash dword */
1323 hi_hash_dword = IXGBE_NTOHL(common.dword);
1325 /* low dword is word swapped version of common */
1326 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1328 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1329 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1331 /* Process bits 0 and 16 */
1332 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1335 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1336 * delay this because bit 0 of the stream should not be processed
1337 * so we do not add the vlan until after bit 0 was processed
1339 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1341 /* Process remaining 30 bit of the key */
1342 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1343 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1344 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1345 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1346 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1347 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1348 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1349 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1350 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1351 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1352 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1353 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1354 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1355 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1356 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1358 /* combine common_hash result with signature and bucket hashes */
1359 bucket_hash ^= common_hash;
1360 bucket_hash &= IXGBE_ATR_HASH_MASK;
1362 sig_hash ^= common_hash << 16;
1363 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1365 /* return completed signature hash */
1366 return sig_hash ^ bucket_hash;
1370 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1371 * @hw: pointer to hardware structure
1372 * @input: unique input dword
1373 * @common: compressed common input dword
1374 * @queue: queue index to direct traffic to
1376 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1377 union ixgbe_atr_hash_dword input,
1378 union ixgbe_atr_hash_dword common,
1384 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1387 * Get the flow_type in order to program FDIRCMD properly
1388 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1390 switch (input.formatted.flow_type) {
1391 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1392 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1393 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1394 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1395 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1396 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1399 DEBUGOUT(" Error on flow type input\n");
1400 return IXGBE_ERR_CONFIG;
1403 /* configure FDIRCMD register */
1404 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1405 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1406 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1407 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1410 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1411 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1413 fdirhashcmd = (u64)fdircmd << 32;
1414 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1415 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1417 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1419 return IXGBE_SUCCESS;
1422 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1425 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1426 bucket_hash ^= lo_hash_dword >> n; \
1427 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1428 bucket_hash ^= hi_hash_dword >> n; \
1432 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1433 * @atr_input: input bitstream to compute the hash on
1434 * @input_mask: mask for the input bitstream
1436 * This function serves two main purposes. First it applys the input_mask
1437 * to the atr_input resulting in a cleaned up atr_input data stream.
1438 * Secondly it computes the hash and stores it in the bkt_hash field at
1439 * the end of the input byte stream. This way it will be available for
1440 * future use without needing to recompute the hash.
1442 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1443 union ixgbe_atr_input *input_mask)
1446 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1447 u32 bucket_hash = 0;
1449 /* Apply masks to input data */
1450 input->dword_stream[0] &= input_mask->dword_stream[0];
1451 input->dword_stream[1] &= input_mask->dword_stream[1];
1452 input->dword_stream[2] &= input_mask->dword_stream[2];
1453 input->dword_stream[3] &= input_mask->dword_stream[3];
1454 input->dword_stream[4] &= input_mask->dword_stream[4];
1455 input->dword_stream[5] &= input_mask->dword_stream[5];
1456 input->dword_stream[6] &= input_mask->dword_stream[6];
1457 input->dword_stream[7] &= input_mask->dword_stream[7];
1458 input->dword_stream[8] &= input_mask->dword_stream[8];
1459 input->dword_stream[9] &= input_mask->dword_stream[9];
1460 input->dword_stream[10] &= input_mask->dword_stream[10];
1462 /* record the flow_vm_vlan bits as they are a key part to the hash */
1463 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1465 /* generate common hash dword */
1466 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1467 input->dword_stream[2] ^
1468 input->dword_stream[3] ^
1469 input->dword_stream[4] ^
1470 input->dword_stream[5] ^
1471 input->dword_stream[6] ^
1472 input->dword_stream[7] ^
1473 input->dword_stream[8] ^
1474 input->dword_stream[9] ^
1475 input->dword_stream[10]);
1477 /* low dword is word swapped version of common */
1478 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1480 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1481 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1483 /* Process bits 0 and 16 */
1484 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1487 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1488 * delay this because bit 0 of the stream should not be processed
1489 * so we do not add the vlan until after bit 0 was processed
1491 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1493 /* Process remaining 30 bit of the key */
1494 IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1495 IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1496 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1497 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1498 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1499 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1500 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1501 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1502 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1503 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1504 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1505 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1506 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1507 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1508 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1511 * Limit hash to 13 bits since max bucket count is 8K.
1512 * Store result at the end of the input stream.
1514 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1518 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1519 * @input_mask: mask to be bit swapped
1521 * The source and destination port masks for flow director are bit swapped
1522 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1523 * generate a correctly swapped value we need to bit swap the mask and that
1524 * is what is accomplished by this function.
1526 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1528 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1529 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1530 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1531 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1532 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1533 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1534 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1538 * These two macros are meant to address the fact that we have registers
1539 * that are either all or in part big-endian. As a result on big-endian
1540 * systems we will end up byte swapping the value to little-endian before
1541 * it is byte swapped again and written to the hardware in the original
1542 * big-endian format.
1544 #define IXGBE_STORE_AS_BE32(_value) \
1545 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1546 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1548 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1549 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1551 #define IXGBE_STORE_AS_BE16(_value) \
1552 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1554 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1555 union ixgbe_atr_input *input_mask)
1557 /* mask IPv6 since it is currently not supported */
1558 u32 fdirm = IXGBE_FDIRM_DIPv6;
1561 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1564 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1565 * are zero, then assume a full mask for that field. Also assume that
1566 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1567 * cannot be masked out in this implementation.
1569 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1573 /* verify bucket hash is cleared on hash generation */
1574 if (input_mask->formatted.bkt_hash)
1575 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1577 /* Program FDIRM and verify partial masks */
1578 switch (input_mask->formatted.vm_pool & 0x7F) {
1580 fdirm |= IXGBE_FDIRM_POOL;
1584 DEBUGOUT(" Error on vm pool mask\n");
1585 return IXGBE_ERR_CONFIG;
1588 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1590 fdirm |= IXGBE_FDIRM_L4P;
1591 if (input_mask->formatted.dst_port ||
1592 input_mask->formatted.src_port) {
1593 DEBUGOUT(" Error on src/dst port mask\n");
1594 return IXGBE_ERR_CONFIG;
1596 case IXGBE_ATR_L4TYPE_MASK:
1599 DEBUGOUT(" Error on flow type mask\n");
1600 return IXGBE_ERR_CONFIG;
1603 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1605 /* mask VLAN ID, fall through to mask VLAN priority */
1606 fdirm |= IXGBE_FDIRM_VLANID;
1608 /* mask VLAN priority */
1609 fdirm |= IXGBE_FDIRM_VLANP;
1612 /* mask VLAN ID only, fall through */
1613 fdirm |= IXGBE_FDIRM_VLANID;
1615 /* no VLAN fields masked */
1618 DEBUGOUT(" Error on VLAN mask\n");
1619 return IXGBE_ERR_CONFIG;
1622 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1624 /* Mask Flex Bytes, fall through */
1625 fdirm |= IXGBE_FDIRM_FLEX;
1629 DEBUGOUT(" Error on flexible byte mask\n");
1630 return IXGBE_ERR_CONFIG;
1633 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1634 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1636 /* store the TCP/UDP port masks, bit reversed from port layout */
1637 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1639 /* write both the same so that UDP and TCP use the same mask */
1640 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1641 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1643 /* store source and destination IP masks (big-enian) */
1644 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1645 ~input_mask->formatted.src_ip[0]);
1646 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1647 ~input_mask->formatted.dst_ip[0]);
1649 return IXGBE_SUCCESS;
1652 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1653 union ixgbe_atr_input *input,
1654 u16 soft_id, u8 queue)
1656 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1658 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1660 /* currently IPv6 is not supported, must be programmed with 0 */
1661 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1662 input->formatted.src_ip[0]);
1663 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1664 input->formatted.src_ip[1]);
1665 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1666 input->formatted.src_ip[2]);
1668 /* record the source address (big-endian) */
1669 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1671 /* record the first 32 bits of the destination address (big-endian) */
1672 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1674 /* record source and destination port (little-endian)*/
1675 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1676 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1677 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1678 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1680 /* record vlan (little-endian) and flex_bytes(big-endian) */
1681 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1682 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1683 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1684 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1686 /* configure FDIRHASH register */
1687 fdirhash = input->formatted.bkt_hash;
1688 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1689 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1692 * flush all previous writes to make certain registers are
1693 * programmed prior to issuing the command
1695 IXGBE_WRITE_FLUSH(hw);
1697 /* configure FDIRCMD register */
1698 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1699 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1700 if (queue == IXGBE_FDIR_DROP_QUEUE)
1701 fdircmd |= IXGBE_FDIRCMD_DROP;
1702 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1703 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1704 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1706 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1708 return IXGBE_SUCCESS;
1711 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1712 union ixgbe_atr_input *input,
1718 s32 err = IXGBE_SUCCESS;
1720 /* configure FDIRHASH register */
1721 fdirhash = input->formatted.bkt_hash;
1722 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1723 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1725 /* flush hash to HW */
1726 IXGBE_WRITE_FLUSH(hw);
1728 /* Query if filter is present */
1729 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1731 for (retry_count = 10; retry_count; retry_count--) {
1732 /* allow 10us for query to process */
1734 /* verify query completed successfully */
1735 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1736 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1741 err = IXGBE_ERR_FDIR_REINIT_FAILED;
1743 /* if filter exists in hardware then remove it */
1744 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1745 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1746 IXGBE_WRITE_FLUSH(hw);
1747 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1748 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1755 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1756 * @hw: pointer to hardware structure
1757 * @input: input bitstream
1758 * @input_mask: mask for the input bitstream
1759 * @soft_id: software index for the filters
1760 * @queue: queue index to direct traffic to
1762 * Note that the caller to this function must lock before calling, since the
1763 * hardware writes must be protected from one another.
1765 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1766 union ixgbe_atr_input *input,
1767 union ixgbe_atr_input *input_mask,
1768 u16 soft_id, u8 queue)
1770 s32 err = IXGBE_ERR_CONFIG;
1772 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1775 * Check flow_type formatting, and bail out before we touch the hardware
1776 * if there's a configuration issue
1778 switch (input->formatted.flow_type) {
1779 case IXGBE_ATR_FLOW_TYPE_IPV4:
1780 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1781 if (input->formatted.dst_port || input->formatted.src_port) {
1782 DEBUGOUT(" Error on src/dst port\n");
1783 return IXGBE_ERR_CONFIG;
1786 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1787 if (input->formatted.dst_port || input->formatted.src_port) {
1788 DEBUGOUT(" Error on src/dst port\n");
1789 return IXGBE_ERR_CONFIG;
1791 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1792 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1793 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1794 IXGBE_ATR_L4TYPE_MASK;
1797 DEBUGOUT(" Error on flow type input\n");
1801 /* program input mask into the HW */
1802 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1806 /* apply mask and compute/store hash */
1807 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1809 /* program filters to filter memory */
1810 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
1815 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1816 * @hw: pointer to hardware structure
1817 * @reg: analog register to read
1820 * Performs read operation to Omer analog register specified.
1822 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1826 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
1828 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1830 IXGBE_WRITE_FLUSH(hw);
1832 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
1833 *val = (u8)core_ctl;
1835 return IXGBE_SUCCESS;
1839 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
1840 * @hw: pointer to hardware structure
1841 * @reg: atlas register to write
1842 * @val: value to write
1844 * Performs write operation to Omer analog register specified.
1846 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1850 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
1852 core_ctl = (reg << 8) | val;
1853 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
1854 IXGBE_WRITE_FLUSH(hw);
1857 return IXGBE_SUCCESS;
1861 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
1862 * @hw: pointer to hardware structure
1864 * Starts the hardware using the generic start_hw function
1865 * and the generation start_hw function.
1866 * Then performs revision-specific operations, if any.
1868 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1870 s32 ret_val = IXGBE_SUCCESS;
1872 DEBUGFUNC("ixgbe_start_hw_82599");
1874 ret_val = ixgbe_start_hw_generic(hw);
1875 if (ret_val != IXGBE_SUCCESS)
1878 ret_val = ixgbe_start_hw_gen2(hw);
1879 if (ret_val != IXGBE_SUCCESS)
1882 /* We need to run link autotry after the driver loads */
1883 hw->mac.autotry_restart = TRUE;
1885 if (ret_val == IXGBE_SUCCESS)
1886 ret_val = ixgbe_verify_fw_version_82599(hw);
1892 * ixgbe_identify_phy_82599 - Get physical layer module
1893 * @hw: pointer to hardware structure
1895 * Determines the physical layer module found on the current adapter.
1896 * If PHY already detected, maintains current PHY type in hw struct,
1897 * otherwise executes the PHY detection routine.
1899 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1901 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1903 DEBUGFUNC("ixgbe_identify_phy_82599");
1905 /* Detect PHY if not unknown - returns success if already detected. */
1906 status = ixgbe_identify_phy_generic(hw);
1907 if (status != IXGBE_SUCCESS) {
1908 /* 82599 10GBASE-T requires an external PHY */
1909 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1912 status = ixgbe_identify_module_generic(hw);
1915 /* Set PHY type none if no PHY detected */
1916 if (hw->phy.type == ixgbe_phy_unknown) {
1917 hw->phy.type = ixgbe_phy_none;
1918 status = IXGBE_SUCCESS;
1921 /* Return error if SFP module has been detected but is not supported */
1922 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1923 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1930 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
1931 * @hw: pointer to hardware structure
1933 * Determines physical layer capabilities of the current configuration.
1935 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1937 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1938 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1939 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1940 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1941 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1942 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1943 u16 ext_ability = 0;
1944 u8 comp_codes_10g = 0;
1945 u8 comp_codes_1g = 0;
1947 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
1949 hw->phy.ops.identify(hw);
1951 switch (hw->phy.type) {
1953 case ixgbe_phy_cu_unknown:
1954 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1955 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1956 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1957 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1958 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1959 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1960 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1961 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1967 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1968 case IXGBE_AUTOC_LMS_1G_AN:
1969 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1970 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
1971 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
1972 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1975 /* SFI mode so read SFP module */
1978 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1979 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
1980 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1981 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
1982 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1983 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
1984 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
1987 case IXGBE_AUTOC_LMS_10G_SERIAL:
1988 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
1989 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
1991 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
1994 case IXGBE_AUTOC_LMS_KX4_KX_KR:
1995 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
1996 if (autoc & IXGBE_AUTOC_KX_SUPP)
1997 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1998 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1999 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2000 if (autoc & IXGBE_AUTOC_KR_SUPP)
2001 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2010 /* SFP check must be done last since DA modules are sometimes used to
2011 * test KR mode - we need to id KR mode correctly before SFP module.
2012 * Call identify_sfp because the pluggable module may have changed */
2013 hw->phy.ops.identify_sfp(hw);
2014 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2017 switch (hw->phy.type) {
2018 case ixgbe_phy_sfp_passive_tyco:
2019 case ixgbe_phy_sfp_passive_unknown:
2020 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2022 case ixgbe_phy_sfp_ftl_active:
2023 case ixgbe_phy_sfp_active_unknown:
2024 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2026 case ixgbe_phy_sfp_avago:
2027 case ixgbe_phy_sfp_ftl:
2028 case ixgbe_phy_sfp_intel:
2029 case ixgbe_phy_sfp_unknown:
2030 hw->phy.ops.read_i2c_eeprom(hw,
2031 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2032 hw->phy.ops.read_i2c_eeprom(hw,
2033 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2034 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2035 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2036 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2037 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2038 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2039 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2046 return physical_layer;
2050 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2051 * @hw: pointer to hardware structure
2052 * @regval: register value to write to RXCTRL
2054 * Enables the Rx DMA unit for 82599
2056 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2059 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2062 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2063 * If traffic is incoming before we enable the Rx unit, it could hang
2064 * the Rx DMA unit. Therefore, make sure the security engine is
2065 * completely disabled prior to enabling the Rx unit.
2068 hw->mac.ops.disable_sec_rx_path(hw);
2070 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2072 hw->mac.ops.enable_sec_rx_path(hw);
2074 return IXGBE_SUCCESS;
2078 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2079 * @hw: pointer to hardware structure
2081 * Verifies that installed the firmware version is 0.6 or higher
2082 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2084 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2085 * if the FW version is not supported.
2087 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2089 s32 status = IXGBE_ERR_EEPROM_VERSION;
2090 u16 fw_offset, fw_ptp_cfg_offset;
2093 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2095 /* firmware check is only necessary for SFI devices */
2096 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2097 status = IXGBE_SUCCESS;
2098 goto fw_version_out;
2101 /* get the offset to the Firmware Module block */
2102 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2104 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2105 goto fw_version_out;
2107 /* get the offset to the Pass Through Patch Configuration block */
2108 hw->eeprom.ops.read(hw, (fw_offset +
2109 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2110 &fw_ptp_cfg_offset);
2112 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2113 goto fw_version_out;
2115 /* get the firmware version */
2116 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2117 IXGBE_FW_PATCH_VERSION_4), &fw_version);
2119 if (fw_version > 0x5)
2120 status = IXGBE_SUCCESS;
2127 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2128 * @hw: pointer to hardware structure
2130 * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2131 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2133 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2135 bool lesm_enabled = FALSE;
2136 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2139 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2141 /* get the offset to the Firmware Module block */
2142 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2144 if ((status != IXGBE_SUCCESS) ||
2145 (fw_offset == 0) || (fw_offset == 0xFFFF))
2148 /* get the offset to the LESM Parameters block */
2149 status = hw->eeprom.ops.read(hw, (fw_offset +
2150 IXGBE_FW_LESM_PARAMETERS_PTR),
2151 &fw_lesm_param_offset);
2153 if ((status != IXGBE_SUCCESS) ||
2154 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2157 /* get the lesm state word */
2158 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2159 IXGBE_FW_LESM_STATE_1),
2162 if ((status == IXGBE_SUCCESS) &&
2163 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2164 lesm_enabled = TRUE;
2167 return lesm_enabled;
2171 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2172 * fastest available method
2174 * @hw: pointer to hardware structure
2175 * @offset: offset of word in EEPROM to read
2176 * @words: number of words
2177 * @data: word(s) read from the EEPROM
2179 * Retrieves 16 bit word(s) read from EEPROM
2181 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2182 u16 words, u16 *data)
2184 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2185 s32 ret_val = IXGBE_ERR_CONFIG;
2187 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2190 * If EEPROM is detected and can be addressed using 14 bits,
2191 * use EERD otherwise use bit bang
2193 if ((eeprom->type == ixgbe_eeprom_spi) &&
2194 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2195 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2198 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2206 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2207 * fastest available method
2209 * @hw: pointer to hardware structure
2210 * @offset: offset of word in the EEPROM to read
2211 * @data: word read from the EEPROM
2213 * Reads a 16 bit word from the EEPROM
2215 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2216 u16 offset, u16 *data)
2218 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2219 s32 ret_val = IXGBE_ERR_CONFIG;
2221 DEBUGFUNC("ixgbe_read_eeprom_82599");
2224 * If EEPROM is detected and can be addressed using 14 bits,
2225 * use EERD otherwise use bit bang
2227 if ((eeprom->type == ixgbe_eeprom_spi) &&
2228 (offset <= IXGBE_EERD_MAX_ADDR))
2229 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2231 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);