1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixgbe_type.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
40 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
41 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
44 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
45 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
46 ixgbe_link_speed speed, bool autoneg,
47 bool autoneg_wait_to_complete);
48 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
49 ixgbe_link_speed speed, bool autoneg,
50 bool autoneg_wait_to_complete);
51 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
52 bool autoneg_wait_to_complete);
53 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
54 ixgbe_link_speed speed,
56 bool autoneg_wait_to_complete);
57 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
58 ixgbe_link_speed speed,
60 bool autoneg_wait_to_complete);
61 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
62 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
63 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
64 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
65 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
66 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
67 void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw);
68 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
69 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
70 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
71 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
72 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
73 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
75 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
77 struct ixgbe_mac_info *mac = &hw->mac;
79 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
81 if (hw->phy.multispeed_fiber) {
82 /* Set up dual speed SFP+ support */
83 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
85 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
86 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
87 hw->phy.smart_speed == ixgbe_smart_speed_on))
88 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
90 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
95 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
96 * @hw: pointer to hardware structure
98 * Initialize any function pointers that were not able to be
99 * set during init_shared_code because the PHY/SFP type was
100 * not known. Perform the SFP init if necessary.
103 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
105 struct ixgbe_mac_info *mac = &hw->mac;
106 struct ixgbe_phy_info *phy = &hw->phy;
107 s32 ret_val = IXGBE_SUCCESS;
109 DEBUGFUNC("ixgbe_init_phy_ops_82599");
111 /* Identify the PHY or SFP module */
112 ret_val = phy->ops.identify(hw);
113 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
114 goto init_phy_ops_out;
116 /* Setup function pointers based on detected SFP module and speeds */
117 ixgbe_init_mac_link_ops_82599(hw);
118 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
119 hw->phy.ops.reset = NULL;
121 /* If copper media, overwrite with copper function pointers */
122 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
123 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
124 mac->ops.get_link_capabilities =
125 &ixgbe_get_copper_link_capabilities_generic;
128 /* Set necessary function pointers based on phy type */
129 switch (hw->phy.type) {
131 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
132 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
133 phy->ops.get_firmware_version =
134 &ixgbe_get_phy_firmware_version_tnx;
137 phy->ops.get_firmware_version =
138 &ixgbe_get_phy_firmware_version_generic;
147 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
149 s32 ret_val = IXGBE_SUCCESS;
150 u16 list_offset, data_offset, data_value;
152 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
154 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
155 ixgbe_init_mac_link_ops_82599(hw);
157 hw->phy.ops.reset = NULL;
159 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
161 if (ret_val != IXGBE_SUCCESS)
164 /* PHY config will finish before releasing the semaphore */
165 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
166 if (ret_val != IXGBE_SUCCESS) {
167 ret_val = IXGBE_ERR_SWFW_SYNC;
171 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
172 while (data_value != 0xffff) {
173 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
174 IXGBE_WRITE_FLUSH(hw);
175 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
177 /* Now restart DSP by setting Restart_AN */
178 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
179 (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
181 /* Release the semaphore */
182 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
183 /* Delay obtaining semaphore again to allow FW access */
184 msec_delay(hw->eeprom.semaphore_delay);
192 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
193 * @hw: pointer to hardware structure
195 * Initialize the function pointers and assign the MAC type for 82599.
196 * Does not touch the hardware.
199 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
201 struct ixgbe_mac_info *mac = &hw->mac;
202 struct ixgbe_phy_info *phy = &hw->phy;
205 DEBUGFUNC("ixgbe_init_ops_82599");
207 ret_val = ixgbe_init_phy_ops_generic(hw);
208 ret_val = ixgbe_init_ops_generic(hw);
211 phy->ops.identify = &ixgbe_identify_phy_82599;
212 phy->ops.init = &ixgbe_init_phy_ops_82599;
215 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
216 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
217 mac->ops.get_supported_physical_layer =
218 &ixgbe_get_supported_physical_layer_82599;
219 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
220 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
221 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
222 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
223 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
224 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
225 mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
226 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
228 /* RAR, Multicast, VLAN */
229 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
230 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
231 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
232 mac->rar_highwater = 1;
233 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
234 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
235 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
236 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
239 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
240 mac->ops.check_link = &ixgbe_check_mac_link_generic;
241 ixgbe_init_mac_link_ops_82599(hw);
243 mac->mcft_size = 128;
245 mac->num_rar_entries = 128;
246 mac->max_tx_queues = 128;
247 mac->max_rx_queues = 128;
248 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
255 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
256 * @hw: pointer to hardware structure
257 * @speed: pointer to link speed
258 * @negotiation: TRUE when autoneg or autotry is enabled
260 * Determines the link capabilities by reading the AUTOC register.
262 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
263 ixgbe_link_speed *speed,
266 s32 status = IXGBE_SUCCESS;
269 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
274 * Determine link capabilities based on the stored value of AUTOC,
275 * which represents EEPROM defaults. If AUTOC value has not
276 * been stored, use the current register values.
278 if (hw->mac.orig_link_settings_stored)
279 autoc = hw->mac.orig_autoc;
281 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
283 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
284 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
285 *speed = IXGBE_LINK_SPEED_1GB_FULL;
286 *negotiation = FALSE;
289 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
290 *speed = IXGBE_LINK_SPEED_10GB_FULL;
291 *negotiation = FALSE;
294 case IXGBE_AUTOC_LMS_1G_AN:
295 *speed = IXGBE_LINK_SPEED_1GB_FULL;
299 case IXGBE_AUTOC_LMS_10G_SERIAL:
300 *speed = IXGBE_LINK_SPEED_10GB_FULL;
301 *negotiation = FALSE;
304 case IXGBE_AUTOC_LMS_KX4_KX_KR:
305 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
306 *speed = IXGBE_LINK_SPEED_UNKNOWN;
307 if (autoc & IXGBE_AUTOC_KR_SUPP)
308 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
309 if (autoc & IXGBE_AUTOC_KX4_SUPP)
310 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
311 if (autoc & IXGBE_AUTOC_KX_SUPP)
312 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
316 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
317 *speed = IXGBE_LINK_SPEED_100_FULL;
318 if (autoc & IXGBE_AUTOC_KR_SUPP)
319 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
320 if (autoc & IXGBE_AUTOC_KX4_SUPP)
321 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
322 if (autoc & IXGBE_AUTOC_KX_SUPP)
323 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
327 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
328 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
329 *negotiation = FALSE;
333 status = IXGBE_ERR_LINK_SETUP;
338 if (hw->phy.multispeed_fiber) {
339 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
340 IXGBE_LINK_SPEED_1GB_FULL;
349 * ixgbe_get_media_type_82599 - Get media type
350 * @hw: pointer to hardware structure
352 * Returns the media type (fiber, copper, backplane)
354 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
356 enum ixgbe_media_type media_type;
358 DEBUGFUNC("ixgbe_get_media_type_82599");
360 /* Detect if there is a copper PHY attached. */
361 if (hw->phy.type == ixgbe_phy_cu_unknown ||
362 hw->phy.type == ixgbe_phy_tn ||
363 hw->phy.type == ixgbe_phy_aq) {
364 media_type = ixgbe_media_type_copper;
368 switch (hw->device_id) {
369 case IXGBE_DEV_ID_82599_KX4:
370 case IXGBE_DEV_ID_82599_KX4_MEZZ:
371 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
372 case IXGBE_DEV_ID_82599_XAUI_LOM:
373 /* Default device ID is mezzanine card KX/KX4 */
374 media_type = ixgbe_media_type_backplane;
376 case IXGBE_DEV_ID_82599_SFP:
377 media_type = ixgbe_media_type_fiber;
379 case IXGBE_DEV_ID_82599_CX4:
380 media_type = ixgbe_media_type_cx4;
383 media_type = ixgbe_media_type_unknown;
391 * ixgbe_start_mac_link_82599 - Setup MAC link settings
392 * @hw: pointer to hardware structure
394 * Configures link settings based on values in the ixgbe_hw struct.
395 * Restarts the link. Performs autonegotiation if needed.
397 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
398 bool autoneg_wait_to_complete)
403 s32 status = IXGBE_SUCCESS;
405 DEBUGFUNC("ixgbe_start_mac_link_82599");
409 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
410 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
411 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
413 /* Only poll for autoneg to complete if specified to do so */
414 if (autoneg_wait_to_complete) {
415 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
416 IXGBE_AUTOC_LMS_KX4_KX_KR ||
417 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
418 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN
419 || (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
420 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
421 links_reg = 0; /* Just in case Autoneg time = 0 */
422 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
423 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
424 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
428 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
429 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
430 DEBUGOUT("Autoneg did not complete.\n");
435 /* Add delay to filter out noises during initial link setup */
442 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
443 * @hw: pointer to hardware structure
444 * @speed: new link speed
445 * @autoneg: TRUE if autonegotiation enabled
446 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
448 * Set the link speed in the AUTOC register and restarts link.
450 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
451 ixgbe_link_speed speed, bool autoneg,
452 bool autoneg_wait_to_complete)
454 s32 status = IXGBE_SUCCESS;
455 ixgbe_link_speed link_speed;
456 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
458 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
460 bool link_up = FALSE;
463 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
465 /* Mask off requested but non-supported speeds */
466 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
467 if (status != IXGBE_SUCCESS)
473 * When the driver changes the link speeds that it can support,
474 * it sets autotry_restart to TRUE to indicate that we need to
475 * initiate a new autotry session with the link partner. To do
476 * so, we set the speed then disable and re-enable the tx laser, to
477 * alert the link partner that it also needs to restart autotry on its
478 * end. This is consistent with TRUE clause 37 autoneg, which also
479 * involves a loss of signal.
483 * Try each speed one by one, highest priority first. We do this in
484 * software because 10gb fiber doesn't support speed autonegotiation.
486 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
488 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
490 /* If we already have link at this speed, just jump out */
491 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
492 if (status != IXGBE_SUCCESS)
495 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
498 /* Set the module link speed */
499 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
500 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
502 /* Allow module to change analog characteristics (1G->10G) */
505 status = ixgbe_setup_mac_link_82599(
506 hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
507 autoneg_wait_to_complete);
508 if (status != IXGBE_SUCCESS)
511 /* Flap the tx laser if it has not already been done */
512 if (hw->mac.autotry_restart) {
513 /* Disable tx laser; allow 100us to go dark per spec */
514 esdp_reg |= IXGBE_ESDP_SDP3;
515 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
518 /* Enable tx laser; allow 2ms to light up per spec */
519 esdp_reg &= ~IXGBE_ESDP_SDP3;
520 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
523 hw->mac.autotry_restart = FALSE;
527 * Wait for the controller to acquire link. Per IEEE 802.3ap,
528 * Section 73.10.2, we may have to wait up to 500ms if KR is
529 * attempted. 82599 uses the same timing for 10g SFI.
531 for (i = 0; i < 5; i++) {
532 /* Wait for the link partner to also set speed */
535 /* If we have link, just jump out */
536 status = ixgbe_check_link(hw, &link_speed,
538 if (status != IXGBE_SUCCESS)
546 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
548 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
549 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
551 /* If we already have link at this speed, just jump out */
552 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
553 if (status != IXGBE_SUCCESS)
556 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
559 /* Set the module link speed */
560 esdp_reg &= ~IXGBE_ESDP_SDP5;
561 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
562 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
564 /* Allow module to change analog characteristics (10G->1G) */
567 status = ixgbe_setup_mac_link_82599(
568 hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
569 autoneg_wait_to_complete);
570 if (status != IXGBE_SUCCESS)
573 /* Flap the tx laser if it has not already been done */
574 if (hw->mac.autotry_restart) {
575 /* Disable tx laser; allow 100us to go dark per spec */
576 esdp_reg |= IXGBE_ESDP_SDP3;
577 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
580 /* Enable tx laser; allow 2ms to light up per spec */
581 esdp_reg &= ~IXGBE_ESDP_SDP3;
582 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
585 hw->mac.autotry_restart = FALSE;
588 /* Wait for the link partner to also set speed */
591 /* If we have link, just jump out */
592 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
593 if (status != IXGBE_SUCCESS)
601 * We didn't get link. Configure back to the highest speed we tried,
602 * (if there was more than one). We call ourselves back with just the
603 * single highest speed that the user requested.
606 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
607 highest_link_speed, autoneg, autoneg_wait_to_complete);
610 /* Set autoneg_advertised value based on input link speed */
611 hw->phy.autoneg_advertised = 0;
613 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
614 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
616 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
617 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
623 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
624 * @hw: pointer to hardware structure
625 * @speed: new link speed
626 * @autoneg: TRUE if autonegotiation enabled
627 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
629 * Implements the Intel SmartSpeed algorithm.
631 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
632 ixgbe_link_speed speed, bool autoneg,
633 bool autoneg_wait_to_complete)
635 s32 status = IXGBE_SUCCESS;
636 ixgbe_link_speed link_speed;
638 bool link_up = FALSE;
639 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
641 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
643 /* Set autoneg_advertised value based on input link speed */
644 hw->phy.autoneg_advertised = 0;
646 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
647 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
649 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
650 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
652 if (speed & IXGBE_LINK_SPEED_100_FULL)
653 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
656 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
657 * autoneg advertisement if link is unable to be established at the
658 * highest negotiated rate. This can sometimes happen due to integrity
659 * issues with the physical media connection.
662 /* First, try to get link with full advertisement */
663 hw->phy.smart_speed_active = FALSE;
664 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
665 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
666 autoneg_wait_to_complete);
667 if (status != IXGBE_SUCCESS)
671 * Wait for the controller to acquire link. Per IEEE 802.3ap,
672 * Section 73.10.2, we may have to wait up to 500ms if KR is
673 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
674 * Table 9 in the AN MAS.
676 for (i = 0; i < 5; i++) {
679 /* If we have link, just jump out */
680 status = ixgbe_check_link(hw, &link_speed, &link_up,
682 if (status != IXGBE_SUCCESS)
691 * We didn't get link. If we advertised KR plus one of KX4/KX
692 * (or BX4/BX), then disable KR and try again.
694 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
695 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
698 /* Turn SmartSpeed on to disable KR support */
699 hw->phy.smart_speed_active = TRUE;
700 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
701 autoneg_wait_to_complete);
702 if (status != IXGBE_SUCCESS)
706 * Wait for the controller to acquire link. 600ms will allow for
707 * the AN link_fail_inhibit_timer as well for multiple cycles of
708 * parallel detect, both 10g and 1g. This allows for the maximum
709 * connect attempts as defined in the AN MAS table 73-7.
711 for (i = 0; i < 6; i++) {
714 /* If we have link, just jump out */
715 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
716 if (status != IXGBE_SUCCESS)
723 /* We didn't get link. Turn SmartSpeed back off. */
724 hw->phy.smart_speed_active = FALSE;
725 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
726 autoneg_wait_to_complete);
733 * ixgbe_setup_mac_link_82599 - Set MAC link speed
734 * @hw: pointer to hardware structure
735 * @speed: new link speed
736 * @autoneg: TRUE if autonegotiation enabled
737 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
739 * Set the link speed in the AUTOC register and restarts link.
741 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
742 ixgbe_link_speed speed, bool autoneg,
743 bool autoneg_wait_to_complete)
745 s32 status = IXGBE_SUCCESS;
746 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
747 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
748 u32 start_autoc = autoc;
750 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
751 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
752 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
755 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
757 DEBUGFUNC("ixgbe_setup_mac_link_82599");
759 /* Check to see if speed passed in is supported. */
760 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
761 if (status != IXGBE_SUCCESS)
764 speed &= link_capabilities;
766 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
767 status = IXGBE_ERR_LINK_SETUP;
771 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
772 if (hw->mac.orig_link_settings_stored)
773 orig_autoc = hw->mac.orig_autoc;
777 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
778 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
779 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
780 /* Set KX4/KX/KR support according to speed requested */
781 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
782 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
783 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
784 autoc |= IXGBE_AUTOC_KX4_SUPP;
785 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
786 (hw->phy.smart_speed_active == FALSE))
787 autoc |= IXGBE_AUTOC_KR_SUPP;
788 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
789 autoc |= IXGBE_AUTOC_KX_SUPP;
790 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
791 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
792 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
793 /* Switch from 1G SFI to 10G SFI if requested */
794 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
795 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
796 autoc &= ~IXGBE_AUTOC_LMS_MASK;
797 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
799 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
800 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
801 /* Switch from 10G SFI to 1G SFI if requested */
802 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
803 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
804 autoc &= ~IXGBE_AUTOC_LMS_MASK;
806 autoc |= IXGBE_AUTOC_LMS_1G_AN;
808 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
812 if (autoc != start_autoc) {
815 autoc |= IXGBE_AUTOC_AN_RESTART;
816 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
818 /* Only poll for autoneg to complete if specified to do so */
819 if (autoneg_wait_to_complete) {
820 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
821 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
822 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
823 links_reg = 0; /*Just in case Autoneg time=0*/
824 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
826 IXGBE_READ_REG(hw, IXGBE_LINKS);
827 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
831 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
833 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
834 DEBUGOUT("Autoneg did not complete.\n");
839 /* Add delay to filter out noises during initial link setup */
848 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
849 * @hw: pointer to hardware structure
850 * @speed: new link speed
851 * @autoneg: TRUE if autonegotiation enabled
852 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
854 * Restarts link on PHY and MAC based on settings passed in.
856 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
857 ixgbe_link_speed speed,
859 bool autoneg_wait_to_complete)
863 DEBUGFUNC("ixgbe_setup_copper_link_82599");
865 /* Setup the PHY according to input speed */
866 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
867 autoneg_wait_to_complete);
869 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
874 * ixgbe_reset_hw_82599 - Perform hardware reset
875 * @hw: pointer to hardware structure
877 * Resets the hardware by resetting the transmit and receive units, masks
878 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
881 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
883 s32 status = IXGBE_SUCCESS;
889 DEBUGFUNC("ixgbe_reset_hw_82599");
891 /* Call adapter stop to disable tx/rx and clear interrupts */
892 hw->mac.ops.stop_adapter(hw);
894 /* PHY ops must be identified and initialized prior to reset */
896 /* Identify PHY and related function pointers */
897 status = hw->phy.ops.init(hw);
899 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
902 /* Setup SFP module if there is one present. */
903 if (hw->phy.sfp_setup_needed) {
904 status = hw->mac.ops.setup_sfp(hw);
905 hw->phy.sfp_setup_needed = FALSE;
908 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
912 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
913 hw->phy.ops.reset(hw);
916 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
917 * access and verify no pending requests before reset
919 ixgbe_disable_pcie_master(hw);
923 * Issue global reset to the MAC. This needs to be a SW reset.
924 * If link reset is used, it might reset the MAC when mng is using it
926 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
927 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
928 IXGBE_WRITE_FLUSH(hw);
930 /* Poll for reset bit to self-clear indicating reset is complete */
931 for (i = 0; i < 10; i++) {
933 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
934 if (!(ctrl & IXGBE_CTRL_RST))
937 if (ctrl & IXGBE_CTRL_RST) {
938 status = IXGBE_ERR_RESET_FAILED;
939 DEBUGOUT("Reset polling failed to complete.\n");
943 * Double resets are required for recovery from certain error
944 * conditions. Between resets, it is necessary to stall to allow time
945 * for any pending HW events to complete. We use 1usec since that is
946 * what is needed for ixgbe_disable_pcie_master(). The second reset
947 * then clears out any effects of those events.
949 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
950 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
958 * Store the original AUTOC/AUTOC2 values if they have not been
959 * stored off yet. Otherwise restore the stored original
960 * values since the reset operation sets back to defaults.
962 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
963 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
964 if (hw->mac.orig_link_settings_stored == FALSE) {
965 hw->mac.orig_autoc = autoc;
966 hw->mac.orig_autoc2 = autoc2;
967 hw->mac.orig_link_settings_stored = TRUE;
969 if (autoc != hw->mac.orig_autoc)
970 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
971 IXGBE_AUTOC_AN_RESTART));
973 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
974 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
975 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
976 autoc2 |= (hw->mac.orig_autoc2 &
977 IXGBE_AUTOC2_UPPER_MASK);
978 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
982 /* Store the permanent mac address */
983 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
986 * Store MAC address from RAR0, clear receive address registers, and
987 * clear the multicast table. Also reset num_rar_entries to 128,
988 * since we modify this value when programming the SAN MAC address.
990 hw->mac.num_rar_entries = 128;
991 hw->mac.ops.init_rx_addrs(hw);
993 /* Store the permanent SAN mac address */
994 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
996 /* Add the SAN MAC address to the RAR only if it's a valid address */
997 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
998 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
999 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1001 /* Reserve the last RAR for the SAN MAC address */
1002 hw->mac.num_rar_entries--;
1005 /* Store the alternative WWNN/WWPN prefix */
1006 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1007 &hw->mac.wwpn_prefix);
1014 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1015 * @hw: pointer to hardware structure
1017 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1020 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1021 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1023 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1026 * Before starting reinitialization process,
1027 * FDIRCMD.CMD must be zero.
1029 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1030 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1031 IXGBE_FDIRCMD_CMD_MASK))
1035 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1036 DEBUGOUT("Flow Director previous command isn't complete, "
1037 "aborting table re-initialization. \n");
1038 return IXGBE_ERR_FDIR_REINIT_FAILED;
1041 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1042 IXGBE_WRITE_FLUSH(hw);
1044 * 82599 adapters flow director init flow cannot be restarted,
1045 * Workaround 82599 silicon errata by performing the following steps
1046 * before re-writing the FDIRCTRL control register with the same value.
1047 * - write 1 to bit 8 of FDIRCMD register &
1048 * - write 0 to bit 8 of FDIRCMD register
1050 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1051 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1052 IXGBE_FDIRCMD_CLEARHT));
1053 IXGBE_WRITE_FLUSH(hw);
1054 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1055 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1056 ~IXGBE_FDIRCMD_CLEARHT));
1057 IXGBE_WRITE_FLUSH(hw);
1059 * Clear FDIR Hash register to clear any leftover hashes
1060 * waiting to be programmed.
1062 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1063 IXGBE_WRITE_FLUSH(hw);
1065 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1066 IXGBE_WRITE_FLUSH(hw);
1068 /* Poll init-done after we write FDIRCTRL register */
1069 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1070 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1071 IXGBE_FDIRCTRL_INIT_DONE)
1075 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1076 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1077 return IXGBE_ERR_FDIR_REINIT_FAILED;
1080 /* Clear FDIR statistics registers (read to clear) */
1081 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1082 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1083 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1084 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1085 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1087 return IXGBE_SUCCESS;
1091 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1092 * @hw: pointer to hardware structure
1093 * @pballoc: which mode to allocate filters with
1095 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1101 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1104 * Before enabling Flow Director, the Rx Packet Buffer size
1105 * must be reduced. The new value is the current size minus
1106 * flow director memory usage size.
1108 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1109 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1110 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1113 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1114 * intialized to zero for non DCB mode otherwise actual total RX PB
1115 * would be bigger than programmed and filter space would run into
1118 for (i = 1; i < 8; i++)
1119 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1121 /* Send interrupt when 64 filters are left */
1122 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1124 /* Set the maximum length per hash bucket to 0xA filters */
1125 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1128 case IXGBE_FDIR_PBALLOC_64K:
1129 /* 8k - 1 signature filters */
1130 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1132 case IXGBE_FDIR_PBALLOC_128K:
1133 /* 16k - 1 signature filters */
1134 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1136 case IXGBE_FDIR_PBALLOC_256K:
1137 /* 32k - 1 signature filters */
1138 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1142 return IXGBE_ERR_CONFIG;
1145 /* Move the flexible bytes to use the ethertype - shift 6 words */
1146 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1149 /* Prime the keys for hashing */
1150 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1151 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1152 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1153 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1156 * Poll init-done after we write the register. Estimated times:
1157 * 10G: PBALLOC = 11b, timing is 60us
1158 * 1G: PBALLOC = 11b, timing is 600us
1159 * 100M: PBALLOC = 11b, timing is 6ms
1161 * Multiple these timings by 4 if under full Rx load
1163 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1164 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1165 * this might not finish in our poll time, but we can live with that
1168 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1169 IXGBE_WRITE_FLUSH(hw);
1170 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1171 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1172 IXGBE_FDIRCTRL_INIT_DONE)
1176 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1177 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1179 return IXGBE_SUCCESS;
1183 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1184 * @hw: pointer to hardware structure
1185 * @pballoc: which mode to allocate filters with
1187 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1193 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1196 * Before enabling Flow Director, the Rx Packet Buffer size
1197 * must be reduced. The new value is the current size minus
1198 * flow director memory usage size.
1201 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1202 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1203 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1206 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1207 * intialized to zero for non DCB mode otherwise actual total RX PB
1208 * would be bigger than programmed and filter space would run into
1211 for (i = 1; i < 8; i++)
1212 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1214 /* Send interrupt when 64 filters are left */
1215 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1217 /* Initialize the drop queue to Rx queue 127 */
1218 fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1221 case IXGBE_FDIR_PBALLOC_64K:
1222 /* 2k - 1 perfect filters */
1223 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1225 case IXGBE_FDIR_PBALLOC_128K:
1226 /* 4k - 1 perfect filters */
1227 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1229 case IXGBE_FDIR_PBALLOC_256K:
1230 /* 8k - 1 perfect filters */
1231 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1235 return IXGBE_ERR_CONFIG;
1238 /* Turn perfect match filtering on */
1239 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1240 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1242 /* Move the flexible bytes to use the ethertype - shift 6 words */
1243 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1245 /* Prime the keys for hashing */
1246 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1247 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1248 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1249 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1252 * Poll init-done after we write the register. Estimated times:
1253 * 10G: PBALLOC = 11b, timing is 60us
1254 * 1G: PBALLOC = 11b, timing is 600us
1255 * 100M: PBALLOC = 11b, timing is 6ms
1257 * Multiple these timings by 4 if under full Rx load
1259 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1260 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1261 * this might not finish in our poll time, but we can live with that
1265 /* Set the maximum length per hash bucket to 0xA filters */
1266 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1268 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1269 IXGBE_WRITE_FLUSH(hw);
1270 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1271 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1272 IXGBE_FDIRCTRL_INIT_DONE)
1276 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1277 DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
1279 return IXGBE_SUCCESS;
1284 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1285 * @stream: input bitstream to compute the hash on
1286 * @key: 32-bit hash key
1288 u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
1291 * The algorithm is as follows:
1292 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1293 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1294 * and A[n] x B[n] is bitwise AND between same length strings
1296 * K[n] is 16 bits, defined as:
1297 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1298 * for n modulo 32 < 15, K[n] =
1299 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1301 * S[n] is 16 bits, defined as:
1302 * for n >= 15, S[n] = S[n:n - 15]
1303 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1305 * To simplify for programming, the algorithm is implemented
1306 * in software this way:
1308 * Key[31:0], Stream[335:0]
1310 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
1311 * int_key[350:0] = tmp_key[351:1]
1312 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1315 * for (i = 0; i < 351; i++) {
1317 * hash ^= int_stream[(i + 15):i];
1327 u8 *stream = (u8 *)atr_input;
1328 u8 int_key[44]; /* upper-most bit unused */
1329 u8 hash_str[46]; /* upper-most 2 bits unused */
1330 u16 hash_result = 0;
1333 DEBUGFUNC("ixgbe_atr_compute_hash_82599");
1336 * Initialize the fill member to prevent warnings
1339 tmp_key.fill[0] = 0;
1341 /* First load the temporary key stream */
1342 for (i = 0; i < 6; i++) {
1343 u64 fillkey = ((u64)key << 32) | key;
1344 tmp_key.fill[i] = fillkey;
1348 * Set the interim key for the hashing. Bit 352 is unused, so we must
1349 * shift and compensate when building the key.
1352 int_key[0] = tmp_key.key_stream[0] >> 1;
1353 for (i = 1, j = 0; i < 44; i++) {
1354 unsigned int this_key = tmp_key.key_stream[j] << 7;
1356 int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1360 * Set the interim bit string for the hashing. Bits 368 and 367 are
1361 * unused, so shift and compensate when building the string.
1363 hash_str[0] = (stream[40] & 0x7f) >> 1;
1364 for (i = 1, j = 40; i < 46; i++) {
1365 unsigned int this_str = stream[j] << 7;
1369 hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1373 * Now compute the hash. i is the index into hash_str, j is into our
1374 * key stream, k is counting the number of bits, and h interates within
1377 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
1378 for (h = 0; h < 8 && k < 351; h++, k++) {
1379 if (int_key[j] & (1 << h)) {
1381 * Key bit is set, XOR in the current 16-bit
1382 * string. Example of processing:
1384 * tmp = (hash_str[i - 2] & 0 << 16) |
1385 * (hash_str[i - 1] & 0xff << 8) |
1386 * (hash_str[i] & 0xff >> 0)
1387 * So tmp = hash_str[15 + k:k], since the
1388 * i + 2 clause rolls off the 16-bit value
1390 * tmp = (hash_str[i - 2] & 0x7f << 9) |
1391 * (hash_str[i - 1] & 0xff << 1) |
1392 * (hash_str[i] & 0x80 >> 7)
1394 int tmp = (hash_str[i] >> h);
1395 tmp |= (hash_str[i - 1] << (8 - h));
1396 tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1398 hash_result ^= (u16)tmp;
1407 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1408 * @input: input stream to modify
1409 * @vlan: the VLAN id to load
1411 s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1413 DEBUGFUNC("ixgbe_atr_set_vlan_id_82599");
1415 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1416 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1418 return IXGBE_SUCCESS;
1422 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1423 * @input: input stream to modify
1424 * @src_addr: the IP address to load
1426 s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1428 DEBUGFUNC("ixgbe_atr_set_src_ipv4_82599");
1430 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1431 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1432 (src_addr >> 16) & 0xff;
1433 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1434 (src_addr >> 8) & 0xff;
1435 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1437 return IXGBE_SUCCESS;
1441 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1442 * @input: input stream to modify
1443 * @dst_addr: the IP address to load
1445 s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1447 DEBUGFUNC("ixgbe_atr_set_dst_ipv4_82599");
1449 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1450 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1451 (dst_addr >> 16) & 0xff;
1452 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1453 (dst_addr >> 8) & 0xff;
1454 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1456 return IXGBE_SUCCESS;
1460 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1461 * @input: input stream to modify
1462 * @src_addr_1: the first 4 bytes of the IP address to load
1463 * @src_addr_2: the second 4 bytes of the IP address to load
1464 * @src_addr_3: the third 4 bytes of the IP address to load
1465 * @src_addr_4: the fourth 4 bytes of the IP address to load
1467 s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1468 u32 src_addr_1, u32 src_addr_2,
1469 u32 src_addr_3, u32 src_addr_4)
1471 DEBUGFUNC("ixgbe_atr_set_src_ipv6_82599");
1473 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1474 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
1475 (src_addr_4 >> 8) & 0xff;
1476 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
1477 (src_addr_4 >> 16) & 0xff;
1478 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
1480 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
1481 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
1482 (src_addr_3 >> 8) & 0xff;
1483 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
1484 (src_addr_3 >> 16) & 0xff;
1485 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
1487 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
1488 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
1489 (src_addr_2 >> 8) & 0xff;
1490 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
1491 (src_addr_2 >> 16) & 0xff;
1492 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
1494 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
1495 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
1496 (src_addr_1 >> 8) & 0xff;
1497 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
1498 (src_addr_1 >> 16) & 0xff;
1499 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
1501 return IXGBE_SUCCESS;
1505 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1506 * @input: input stream to modify
1507 * @dst_addr_1: the first 4 bytes of the IP address to load
1508 * @dst_addr_2: the second 4 bytes of the IP address to load
1509 * @dst_addr_3: the third 4 bytes of the IP address to load
1510 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1512 s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1513 u32 dst_addr_1, u32 dst_addr_2,
1514 u32 dst_addr_3, u32 dst_addr_4)
1516 DEBUGFUNC("ixgbe_atr_set_dst_ipv6_82599");
1518 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1519 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
1520 (dst_addr_4 >> 8) & 0xff;
1521 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
1522 (dst_addr_4 >> 16) & 0xff;
1523 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
1525 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
1526 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
1527 (dst_addr_3 >> 8) & 0xff;
1528 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
1529 (dst_addr_3 >> 16) & 0xff;
1530 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
1532 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
1533 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
1534 (dst_addr_2 >> 8) & 0xff;
1535 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
1536 (dst_addr_2 >> 16) & 0xff;
1537 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
1539 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
1540 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
1541 (dst_addr_1 >> 8) & 0xff;
1542 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
1543 (dst_addr_1 >> 16) & 0xff;
1544 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
1546 return IXGBE_SUCCESS;
1550 * ixgbe_atr_set_src_port_82599 - Sets the source port
1551 * @input: input stream to modify
1552 * @src_port: the source port to load
1554 s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1556 DEBUGFUNC("ixgbe_atr_set_src_port_82599");
1558 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1559 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1561 return IXGBE_SUCCESS;
1565 * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1566 * @input: input stream to modify
1567 * @dst_port: the destination port to load
1569 s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1571 DEBUGFUNC("ixgbe_atr_set_dst_port_82599");
1573 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1574 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1576 return IXGBE_SUCCESS;
1580 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1581 * @input: input stream to modify
1582 * @flex_bytes: the flexible bytes to load
1584 s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1586 DEBUGFUNC("ixgbe_atr_set_flex_byte_82599");
1588 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1589 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1591 return IXGBE_SUCCESS;
1595 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1596 * @input: input stream to modify
1597 * @vm_pool: the Virtual Machine pool to load
1599 s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
1601 DEBUGFUNC("ixgbe_atr_set_vm_pool_82599");
1603 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1605 return IXGBE_SUCCESS;
1609 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1610 * @input: input stream to modify
1611 * @l4type: the layer 4 type value to load
1613 s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1615 DEBUGFUNC("ixgbe_atr_set_l4type_82599");
1617 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1619 return IXGBE_SUCCESS;
1623 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1624 * @input: input stream to search
1625 * @vlan: the VLAN id to load
1627 s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1629 DEBUGFUNC("ixgbe_atr_get_vlan_id_82599");
1631 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1632 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1634 return IXGBE_SUCCESS;
1638 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1639 * @input: input stream to search
1640 * @src_addr: the IP address to load
1642 s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
1644 DEBUGFUNC("ixgbe_atr_get_src_ipv4_82599");
1646 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1647 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1648 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1649 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1651 return IXGBE_SUCCESS;
1655 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
1656 * @input: input stream to search
1657 * @dst_addr: the IP address to load
1659 s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
1661 DEBUGFUNC("ixgbe_atr_get_dst_ipv4_82599");
1663 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1664 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1665 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1666 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1668 return IXGBE_SUCCESS;
1672 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
1673 * @input: input stream to search
1674 * @src_addr_1: the first 4 bytes of the IP address to load
1675 * @src_addr_2: the second 4 bytes of the IP address to load
1676 * @src_addr_3: the third 4 bytes of the IP address to load
1677 * @src_addr_4: the fourth 4 bytes of the IP address to load
1679 s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1680 u32 *src_addr_1, u32 *src_addr_2,
1681 u32 *src_addr_3, u32 *src_addr_4)
1683 DEBUGFUNC("ixgbe_atr_get_src_ipv6_82599");
1685 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
1686 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
1687 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
1688 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
1690 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
1691 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
1692 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
1693 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
1695 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
1696 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1697 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1698 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1700 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1701 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1702 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1703 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1705 return IXGBE_SUCCESS;
1709 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
1710 * @input: input stream to search
1711 * @dst_addr_1: the first 4 bytes of the IP address to load
1712 * @dst_addr_2: the second 4 bytes of the IP address to load
1713 * @dst_addr_3: the third 4 bytes of the IP address to load
1714 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1716 s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
1717 u32 *dst_addr_1, u32 *dst_addr_2,
1718 u32 *dst_addr_3, u32 *dst_addr_4)
1720 DEBUGFUNC("ixgbe_atr_get_dst_ipv6_82599");
1722 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
1723 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
1724 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
1725 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
1727 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
1728 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
1729 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
1730 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
1732 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
1733 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
1734 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
1735 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
1737 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
1738 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
1739 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
1740 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
1742 return IXGBE_SUCCESS;
1746 * ixgbe_atr_get_src_port_82599 - Gets the source port
1747 * @input: input stream to modify
1748 * @src_port: the source port to load
1750 * Even though the input is given in big-endian, the FDIRPORT registers
1751 * expect the ports to be programmed in little-endian. Hence the need to swap
1752 * endianness when retrieving the data. This can be confusing since the
1753 * internal hash engine expects it to be big-endian.
1755 s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
1757 DEBUGFUNC("ixgbe_atr_get_src_port_82599");
1759 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
1760 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
1762 return IXGBE_SUCCESS;
1766 * ixgbe_atr_get_dst_port_82599 - Gets the destination port
1767 * @input: input stream to modify
1768 * @dst_port: the destination port to load
1770 * Even though the input is given in big-endian, the FDIRPORT registers
1771 * expect the ports to be programmed in little-endian. Hence the need to swap
1772 * endianness when retrieving the data. This can be confusing since the
1773 * internal hash engine expects it to be big-endian.
1775 s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
1777 DEBUGFUNC("ixgbe_atr_get_dst_port_82599");
1779 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1780 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1782 return IXGBE_SUCCESS;
1786 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
1787 * @input: input stream to modify
1788 * @flex_bytes: the flexible bytes to load
1790 s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
1792 DEBUGFUNC("ixgbe_atr_get_flex_byte_82599");
1794 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1795 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1797 return IXGBE_SUCCESS;
1801 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
1802 * @input: input stream to modify
1803 * @vm_pool: the Virtual Machine pool to load
1805 s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
1807 DEBUGFUNC("ixgbe_atr_get_vm_pool_82599");
1809 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
1811 return IXGBE_SUCCESS;
1815 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
1816 * @input: input stream to modify
1817 * @l4type: the layer 4 type value to load
1819 s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
1821 DEBUGFUNC("ixgbe_atr_get_l4type__82599");
1823 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
1825 return IXGBE_SUCCESS;
1829 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1830 * @hw: pointer to hardware structure
1831 * @stream: input bitstream
1832 * @queue: queue index to direct traffic to
1834 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1835 struct ixgbe_atr_input *input,
1841 u16 bucket_hash, sig_hash;
1844 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1846 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1847 IXGBE_ATR_BUCKET_HASH_KEY);
1849 /* bucket_hash is only 15 bits */
1850 bucket_hash &= IXGBE_ATR_HASH_MASK;
1852 sig_hash = ixgbe_atr_compute_hash_82599(input,
1853 IXGBE_ATR_SIGNATURE_HASH_KEY);
1855 /* Get the l4type in order to program FDIRCMD properly */
1856 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1857 ixgbe_atr_get_l4type_82599(input, &l4type);
1860 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1861 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1863 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1865 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1866 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
1868 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1869 case IXGBE_ATR_L4TYPE_TCP:
1870 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1872 case IXGBE_ATR_L4TYPE_UDP:
1873 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1875 case IXGBE_ATR_L4TYPE_SCTP:
1876 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1879 DEBUGOUT(" Error on l4type input\n");
1880 return IXGBE_ERR_CONFIG;
1883 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
1884 fdircmd |= IXGBE_FDIRCMD_IPV6;
1886 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
1887 fdirhashcmd = ((fdircmd << 32) | fdirhash);
1889 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
1890 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1892 return IXGBE_SUCCESS;
1896 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1897 * @hw: pointer to hardware structure
1898 * @input: input bitstream
1899 * @input_masks: masks for the input bitstream
1900 * @soft_id: software index for the filters
1901 * @queue: queue index to direct traffic to
1903 * Note that the caller to this function must lock before calling, since the
1904 * hardware writes must be protected from one another.
1906 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1907 struct ixgbe_atr_input *input,
1908 struct ixgbe_atr_input_masks *input_masks,
1909 u16 soft_id, u8 queue)
1913 u32 src_ipv4 = 0, dst_ipv4 = 0;
1914 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
1915 u16 src_port, dst_port, vlan_id, flex_bytes;
1920 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1922 /* Get our input values */
1923 ixgbe_atr_get_l4type_82599(input, &l4type);
1926 * Check l4type formatting, and bail out before we touch the hardware
1927 * if there's a configuration issue
1929 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1930 case IXGBE_ATR_L4TYPE_TCP:
1931 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1933 case IXGBE_ATR_L4TYPE_UDP:
1934 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1936 case IXGBE_ATR_L4TYPE_SCTP:
1937 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1940 DEBUGOUT(" Error on l4type input\n");
1941 return IXGBE_ERR_CONFIG;
1944 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1945 IXGBE_ATR_BUCKET_HASH_KEY);
1947 /* bucket_hash is only 15 bits */
1948 bucket_hash &= IXGBE_ATR_HASH_MASK;
1950 ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
1951 ixgbe_atr_get_src_port_82599(input, &src_port);
1952 ixgbe_atr_get_dst_port_82599(input, &dst_port);
1953 ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
1955 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1957 /* Now figure out if we're IPv4 or IPv6 */
1958 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1960 ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
1961 &src_ipv6_3, &src_ipv6_4);
1963 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
1964 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
1965 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
1966 /* The last 4 bytes is the same register as IPv4 */
1967 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
1969 fdircmd |= IXGBE_FDIRCMD_IPV6;
1970 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
1973 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
1974 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
1977 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
1978 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
1980 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
1981 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
1982 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
1983 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1986 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1987 * are zero, then assume a full mask for that field. Also assume that
1988 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1989 * cannot be masked out in this implementation.
1991 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1995 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
1997 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
2000 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
2002 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
2004 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
2005 case IXGBE_ATR_L4TYPE_TCP:
2007 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
2009 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
2010 input_masks->src_port_mask);
2013 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
2014 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
2017 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
2018 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
2019 (input_masks->dst_port_mask << 16)));
2021 case IXGBE_ATR_L4TYPE_UDP:
2023 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
2025 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
2026 input_masks->src_port_mask);
2029 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
2030 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
2033 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
2034 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
2035 (input_masks->src_port_mask << 16)));
2038 /* this already would have failed above */
2042 /* Program the last mask register, FDIRM */
2043 if (input_masks->vlan_id_mask || !vlan_id)
2044 /* Mask both VLAN and VLANP - bits 0 and 1 */
2045 fdirm |= (IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP);
2047 if (input_masks->data_mask || !flex_bytes)
2048 /* Flex bytes need masking, so mask the whole thing - bit 4 */
2049 fdirm |= IXGBE_FDIRM_FLEX;
2051 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
2052 fdirm |= (IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6);
2054 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
2056 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
2057 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
2058 fdircmd |= IXGBE_FDIRCMD_LAST;
2059 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
2060 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
2062 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2063 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
2065 return IXGBE_SUCCESS;
2069 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2070 * @hw: pointer to hardware structure
2071 * @reg: analog register to read
2074 * Performs read operation to Omer analog register specified.
2076 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2080 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2082 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2084 IXGBE_WRITE_FLUSH(hw);
2086 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2087 *val = (u8)core_ctl;
2089 return IXGBE_SUCCESS;
2093 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2094 * @hw: pointer to hardware structure
2095 * @reg: atlas register to write
2096 * @val: value to write
2098 * Performs write operation to Omer analog register specified.
2100 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2104 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2106 core_ctl = (reg << 8) | val;
2107 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2108 IXGBE_WRITE_FLUSH(hw);
2111 return IXGBE_SUCCESS;
2115 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
2116 * @hw: pointer to hardware structure
2118 * Starts the hardware using the generic start_hw function.
2119 * Then performs revision-specific operations:
2120 * Clears the rate limiter registers.
2122 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
2126 s32 ret_val = IXGBE_SUCCESS;
2128 DEBUGFUNC("ixgbe_start_hw_rev_1__82599");
2130 ret_val = ixgbe_start_hw_generic(hw);
2132 /* Clear the rate limiters */
2133 for (i = 0; i < hw->mac.max_tx_queues; i++) {
2134 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
2135 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
2137 IXGBE_WRITE_FLUSH(hw);
2139 /* Disable relaxed ordering */
2140 for (i = 0; i < hw->mac.max_tx_queues; i++) {
2141 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2142 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2143 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
2146 for (i = 0; i < hw->mac.max_rx_queues; i++) {
2147 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
2148 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
2149 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
2150 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
2153 /* We need to run link autotry after the driver loads */
2154 hw->mac.autotry_restart = TRUE;
2156 if (ret_val == IXGBE_SUCCESS)
2157 ret_val = ixgbe_verify_fw_version_82599(hw);
2162 * ixgbe_identify_phy_82599 - Get physical layer module
2163 * @hw: pointer to hardware structure
2165 * Determines the physical layer module found on the current adapter.
2166 * If PHY already detected, maintains current PHY type in hw struct,
2167 * otherwise executes the PHY detection routine.
2169 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2171 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2173 DEBUGFUNC("ixgbe_identify_phy_82599");
2175 /* Detect PHY if not unknown - returns success if already detected. */
2176 status = ixgbe_identify_phy_generic(hw);
2177 if (status != IXGBE_SUCCESS)
2178 status = ixgbe_identify_sfp_module_generic(hw);
2179 /* Set PHY type none if no PHY detected */
2180 if (hw->phy.type == ixgbe_phy_unknown) {
2181 hw->phy.type = ixgbe_phy_none;
2182 status = IXGBE_SUCCESS;
2185 /* Return error if SFP module has been detected but is not supported */
2186 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2187 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2193 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2194 * @hw: pointer to hardware structure
2196 * Determines physical layer capabilities of the current configuration.
2198 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2200 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2201 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2202 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2203 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2204 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2205 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2206 u16 ext_ability = 0;
2207 u8 comp_codes_10g = 0;
2209 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2211 hw->phy.ops.identify(hw);
2213 if (hw->phy.type == ixgbe_phy_tn ||
2214 hw->phy.type == ixgbe_phy_aq ||
2215 hw->phy.type == ixgbe_phy_cu_unknown) {
2216 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2217 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2218 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2219 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2220 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2221 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2222 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2223 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2227 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2228 case IXGBE_AUTOC_LMS_1G_AN:
2229 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2230 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2231 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2232 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2235 /* SFI mode so read SFP module */
2238 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2239 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2240 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2241 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2242 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2243 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2244 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2247 case IXGBE_AUTOC_LMS_10G_SERIAL:
2248 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2249 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2251 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2254 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2255 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2256 if (autoc & IXGBE_AUTOC_KX_SUPP)
2257 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2258 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2259 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2260 if (autoc & IXGBE_AUTOC_KR_SUPP)
2261 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2270 /* SFP check must be done last since DA modules are sometimes used to
2271 * test KR mode - we need to id KR mode correctly before SFP module.
2272 * Call identify_sfp because the pluggable module may have changed */
2273 hw->phy.ops.identify_sfp(hw);
2274 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2277 switch (hw->phy.type) {
2278 case ixgbe_phy_sfp_passive_tyco:
2279 case ixgbe_phy_sfp_passive_unknown:
2280 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2282 case ixgbe_phy_sfp_ftl_active:
2283 case ixgbe_phy_sfp_active_unknown:
2284 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2286 case ixgbe_phy_sfp_avago:
2287 case ixgbe_phy_sfp_ftl:
2288 case ixgbe_phy_sfp_intel:
2289 case ixgbe_phy_sfp_unknown:
2290 hw->phy.ops.read_i2c_eeprom(hw,
2291 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2292 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2293 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2294 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2295 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2302 return physical_layer;
2306 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2307 * @hw: pointer to hardware structure
2308 * @regval: register value to write to RXCTRL
2310 * Enables the Rx DMA unit for 82599
2312 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2314 #define IXGBE_MAX_SECRX_POLL 30
2318 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2321 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2322 * If traffic is incoming before we enable the Rx unit, it could hang
2323 * the Rx DMA unit. Therefore, make sure the security engine is
2324 * completely disabled prior to enabling the Rx unit.
2326 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2327 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2328 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2329 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2330 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2331 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2334 /* Use interrupt-safe sleep just in case */
2338 /* For informational purposes only */
2339 if (i >= IXGBE_MAX_SECRX_POLL)
2340 DEBUGOUT("Rx unit being enabled before security "
2341 "path fully disabled. Continuing with init.\n");
2343 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2344 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2345 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2346 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2347 IXGBE_WRITE_FLUSH(hw);
2349 return IXGBE_SUCCESS;
2353 * ixgbe_get_device_caps_82599 - Get additional device capabilities
2354 * @hw: pointer to hardware structure
2355 * @device_caps: the EEPROM word with the extra device capabilities
2357 * This function will read the EEPROM location for the device capabilities,
2358 * and return the word through device_caps.
2360 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2362 DEBUGFUNC("ixgbe_get_device_caps_82599");
2364 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
2366 return IXGBE_SUCCESS;
2370 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2371 * @hw: pointer to hardware structure
2373 * Verifies that installed the firmware version is 0.6 or higher
2374 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2376 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2377 * if the FW version is not supported.
2379 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2381 s32 status = IXGBE_ERR_EEPROM_VERSION;
2382 u16 fw_offset, fw_ptp_cfg_offset;
2385 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2387 /* firmware check is only necessary for SFI devices */
2388 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2389 status = IXGBE_SUCCESS;
2390 goto fw_version_out;
2393 /* get the offset to the Firmware Module block */
2394 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2396 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2397 goto fw_version_out;
2399 /* get the offset to the Pass Through Patch Configuration block */
2400 hw->eeprom.ops.read(hw, (fw_offset +
2401 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2402 &fw_ptp_cfg_offset);
2404 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2405 goto fw_version_out;
2407 /* get the firmware version */
2408 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2409 IXGBE_FW_PATCH_VERSION_4),
2412 if (fw_version > 0x5)
2413 status = IXGBE_SUCCESS;
2419 * ixgbe_enable_relaxed_ordering_82599 - Enable relaxed ordering
2420 * @hw: pointer to hardware structure
2423 void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw)
2428 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82599");
2430 /* Enable relaxed ordering */
2431 for (i = 0; i < hw->mac.max_tx_queues; i++) {
2432 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2433 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2434 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
2437 for (i = 0; i < hw->mac.max_rx_queues; i++) {
2438 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
2439 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
2440 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
2441 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);