1 /******************************************************************************
3 Copyright (c) 2001-2009, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixgbe_type.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
40 u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw);
41 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
42 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
43 ixgbe_link_speed *speed,
45 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
46 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw);
47 s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
48 ixgbe_link_speed speed, bool autoneg,
49 bool autoneg_wait_to_complete);
50 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw);
51 s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
52 ixgbe_link_speed *speed,
53 bool *link_up, bool link_up_wait_to_complete);
54 s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
55 ixgbe_link_speed speed,
57 bool autoneg_wait_to_complete);
58 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw);
59 static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
60 ixgbe_link_speed speed,
62 bool autoneg_wait_to_complete);
63 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
64 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
65 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
66 s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
67 s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
68 s32 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
69 s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan,
70 u32 vind, bool vlan_on);
71 s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw);
72 s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw);
73 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
74 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
75 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
76 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
77 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
78 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
79 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
80 s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
82 s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr);
83 s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr);
84 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
85 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
88 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
90 struct ixgbe_mac_info *mac = &hw->mac;
92 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
94 if (hw->phy.multispeed_fiber) {
95 /* Set up dual speed SFP+ support */
97 &ixgbe_setup_mac_link_multispeed_fiber;
98 mac->ops.setup_link_speed =
99 &ixgbe_setup_mac_link_speed_multispeed_fiber;
101 mac->ops.setup_link =
102 &ixgbe_setup_mac_link_82599;
103 mac->ops.setup_link_speed =
104 &ixgbe_setup_mac_link_speed_82599;
109 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
110 * @hw: pointer to hardware structure
112 * Initialize any function pointers that were not able to be
113 * set during init_shared_code because the PHY/SFP type was
114 * not known. Perform the SFP init if necessary.
117 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
119 struct ixgbe_mac_info *mac = &hw->mac;
120 struct ixgbe_phy_info *phy = &hw->phy;
121 s32 ret_val = IXGBE_SUCCESS;
123 DEBUGFUNC("ixgbe_init_phy_ops_82599");
125 /* Identify the PHY or SFP module */
126 ret_val = phy->ops.identify(hw);
127 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
128 goto init_phy_ops_out;
130 /* Setup function pointers based on detected SFP module and speeds */
131 ixgbe_init_mac_link_ops_82599(hw);
132 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
133 hw->phy.ops.reset = NULL;
135 /* If copper media, overwrite with copper function pointers */
136 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
137 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
138 mac->ops.setup_link_speed =
139 &ixgbe_setup_copper_link_speed_82599;
140 mac->ops.get_link_capabilities =
141 &ixgbe_get_copper_link_capabilities_generic;
144 /* Set necessary function pointers based on phy type */
145 switch (hw->phy.type) {
147 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
148 phy->ops.get_firmware_version =
149 &ixgbe_get_phy_firmware_version_tnx;
152 phy->ops.get_firmware_version =
153 &ixgbe_get_phy_firmware_version_aq;
162 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
164 s32 ret_val = IXGBE_SUCCESS;
165 u16 list_offset, data_offset, data_value;
167 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
169 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
170 ixgbe_init_mac_link_ops_82599(hw);
172 hw->phy.ops.reset = NULL;
174 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
176 if (ret_val != IXGBE_SUCCESS)
179 /* PHY config will finish before releasing the semaphore */
180 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
181 if (ret_val != IXGBE_SUCCESS) {
182 ret_val = IXGBE_ERR_SWFW_SYNC;
186 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
187 while (data_value != 0xffff) {
188 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
189 IXGBE_WRITE_FLUSH(hw);
190 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
192 /* Now restart DSP by setting Restart_AN */
193 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
194 (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
196 /* Release the semaphore */
197 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
198 /* Delay obtaining semaphore again to allow FW access */
199 msec_delay(hw->eeprom.semaphore_delay);
207 * ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count
208 * @hw: pointer to hardware structure
210 * Read PCIe configuration space, and get the MSI-X vector count from
211 * the capabilities table.
213 u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
217 if (hw->mac.msix_vectors_from_pcie) {
218 msix_count = IXGBE_READ_PCIE_WORD(hw,
219 IXGBE_PCIE_MSIX_82599_CAPS);
220 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
222 /* MSI-X count is zero-based in HW, so increment to give
231 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
232 * @hw: pointer to hardware structure
234 * Initialize the function pointers and assign the MAC type for 82599.
235 * Does not touch the hardware.
238 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
240 struct ixgbe_mac_info *mac = &hw->mac;
241 struct ixgbe_phy_info *phy = &hw->phy;
244 ret_val = ixgbe_init_phy_ops_generic(hw);
245 ret_val = ixgbe_init_ops_generic(hw);
248 phy->ops.identify = &ixgbe_identify_phy_82599;
249 phy->ops.init = &ixgbe_init_phy_ops_82599;
252 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
253 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
254 mac->ops.get_supported_physical_layer =
255 &ixgbe_get_supported_physical_layer_82599;
256 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
257 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
258 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
259 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
260 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_82599;
261 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_82599;
262 mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
264 /* RAR, Multicast, VLAN */
265 mac->ops.set_vmdq = &ixgbe_set_vmdq_82599;
266 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82599;
267 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_82599;
268 mac->rar_highwater = 1;
269 mac->ops.set_vfta = &ixgbe_set_vfta_82599;
270 mac->ops.clear_vfta = &ixgbe_clear_vfta_82599;
271 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_82599;
272 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
275 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
276 mac->ops.check_link = &ixgbe_check_mac_link_82599;
277 ixgbe_init_mac_link_ops_82599(hw);
279 mac->mcft_size = 128;
281 mac->num_rar_entries = 128;
282 mac->max_tx_queues = 128;
283 mac->max_rx_queues = 128;
284 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
291 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
292 * @hw: pointer to hardware structure
293 * @speed: pointer to link speed
294 * @negotiation: TRUE when autoneg or autotry is enabled
296 * Determines the link capabilities by reading the AUTOC register.
298 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
299 ixgbe_link_speed *speed,
302 s32 status = IXGBE_SUCCESS;
306 * Determine link capabilities based on the stored value of AUTOC,
307 * which represents EEPROM defaults. If AUTOC value has not
308 * been stored, use the current register values.
310 if (hw->mac.orig_link_settings_stored)
311 autoc = hw->mac.orig_autoc;
313 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
315 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
316 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
317 *speed = IXGBE_LINK_SPEED_1GB_FULL;
318 *negotiation = FALSE;
321 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
322 *speed = IXGBE_LINK_SPEED_10GB_FULL;
323 *negotiation = FALSE;
326 case IXGBE_AUTOC_LMS_1G_AN:
327 *speed = IXGBE_LINK_SPEED_1GB_FULL;
331 case IXGBE_AUTOC_LMS_10G_SERIAL:
332 *speed = IXGBE_LINK_SPEED_10GB_FULL;
333 *negotiation = FALSE;
336 case IXGBE_AUTOC_LMS_KX4_KX_KR:
337 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
338 *speed = IXGBE_LINK_SPEED_UNKNOWN;
339 if (autoc & IXGBE_AUTOC_KR_SUPP)
340 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
341 if (autoc & IXGBE_AUTOC_KX4_SUPP)
342 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
343 if (autoc & IXGBE_AUTOC_KX_SUPP)
344 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
348 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
349 *speed = IXGBE_LINK_SPEED_100_FULL;
350 if (autoc & IXGBE_AUTOC_KR_SUPP)
351 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
352 if (autoc & IXGBE_AUTOC_KX4_SUPP)
353 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
354 if (autoc & IXGBE_AUTOC_KX_SUPP)
355 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
359 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
360 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
361 *negotiation = FALSE;
365 status = IXGBE_ERR_LINK_SETUP;
370 if (hw->phy.multispeed_fiber) {
371 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
372 IXGBE_LINK_SPEED_1GB_FULL;
381 * ixgbe_get_media_type_82599 - Get media type
382 * @hw: pointer to hardware structure
384 * Returns the media type (fiber, copper, backplane)
386 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
388 enum ixgbe_media_type media_type;
390 /* Detect if there is a copper PHY attached. */
391 if (hw->phy.type == ixgbe_phy_cu_unknown ||
392 hw->phy.type == ixgbe_phy_tn ||
393 hw->phy.type == ixgbe_phy_aq) {
394 media_type = ixgbe_media_type_copper;
398 switch (hw->device_id) {
399 case IXGBE_DEV_ID_82599_KX4:
400 case IXGBE_DEV_ID_82599_XAUI_LOM:
401 /* Default device ID is mezzanine card KX/KX4 */
402 media_type = ixgbe_media_type_backplane;
404 case IXGBE_DEV_ID_82599_SFP:
405 media_type = ixgbe_media_type_fiber;
407 case IXGBE_DEV_ID_82599_CX4:
408 media_type = ixgbe_media_type_fiber;
411 media_type = ixgbe_media_type_unknown;
419 * ixgbe_setup_mac_link_82599 - Setup MAC link settings
420 * @hw: pointer to hardware structure
422 * Configures link settings based on values in the ixgbe_hw struct.
423 * Restarts the link. Performs autonegotiation if needed.
425 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
430 s32 status = IXGBE_SUCCESS;
434 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
435 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
436 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
438 /* Only poll for autoneg to complete if specified to do so */
439 if (hw->phy.autoneg_wait_to_complete) {
440 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
441 IXGBE_AUTOC_LMS_KX4_KX_KR ||
442 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
443 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN
444 || (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
445 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
446 links_reg = 0; /* Just in case Autoneg time = 0 */
447 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
448 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
449 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
453 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
454 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
455 DEBUGOUT("Autoneg did not complete.\n");
460 /* Add delay to filter out noises during initial link setup */
467 * ixgbe_setup_mac_link_multispeed_fiber - Setup MAC link settings
468 * @hw: pointer to hardware structure
470 * Configures link settings based on values in the ixgbe_hw struct.
471 * Restarts the link for multi-speed fiber at 1G speed, if link
473 * Performs autonegotiation if needed.
475 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw)
477 s32 status = IXGBE_SUCCESS;
478 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_82599_AUTONEG;
479 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
481 status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw,
482 link_speed, TRUE, true);
487 * ixgbe_setup_mac_link_speed_multispeed_fiber - Set MAC link speed
488 * @hw: pointer to hardware structure
489 * @speed: new link speed
490 * @autoneg: TRUE if autonegotiation enabled
491 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
493 * Set the link speed in the AUTOC register and restarts link.
495 s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
496 ixgbe_link_speed speed, bool autoneg,
497 bool autoneg_wait_to_complete)
499 s32 status = IXGBE_SUCCESS;
500 ixgbe_link_speed link_speed;
501 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
503 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
505 bool link_up = FALSE;
508 /* Mask off requested but non-supported speeds */
509 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
510 if (status != IXGBE_SUCCESS)
515 /* Set autoneg_advertised value based on input link speed */
516 hw->phy.autoneg_advertised = 0;
518 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
519 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
521 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
522 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
525 * When the driver changes the link speeds that it can support,
526 * it sets autotry_restart to TRUE to indicate that we need to
527 * initiate a new autotry session with the link partner. To do
528 * so, we set the speed then disable and re-enable the tx laser, to
529 * alert the link partner that it also needs to restart autotry on its
530 * end. This is consistent with TRUE clause 37 autoneg, which also
531 * involves a loss of signal.
535 * Try each speed one by one, highest priority first. We do this in
536 * software because 10gb fiber doesn't support speed autonegotiation.
538 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
540 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
542 /* If we already have link at this speed, just jump out */
543 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
544 if (status != IXGBE_SUCCESS)
547 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
550 /* Set the module link speed */
551 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
552 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
554 /* Allow module to change analog characteristics (1G->10G) */
557 status = ixgbe_setup_mac_link_speed_82599(
558 hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
559 autoneg_wait_to_complete);
560 if (status != IXGBE_SUCCESS)
563 /* Flap the tx laser if it has not already been done */
564 if (hw->mac.autotry_restart) {
565 /* Disable tx laser; allow 100us to go dark per spec */
566 esdp_reg |= IXGBE_ESDP_SDP3;
567 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
570 /* Enable tx laser; allow 2ms to light up per spec */
571 esdp_reg &= ~IXGBE_ESDP_SDP3;
572 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
575 hw->mac.autotry_restart = FALSE;
578 /* The controller may take up to 500ms at 10g to acquire link */
579 for (i = 0; i < 5; i++) {
580 /* Wait for the link partner to also set speed */
583 /* If we have link, just jump out */
584 status = ixgbe_check_link(hw, &link_speed,
586 if (status != IXGBE_SUCCESS)
594 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
596 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
597 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
599 /* If we already have link at this speed, just jump out */
600 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
601 if (status != IXGBE_SUCCESS)
604 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
607 /* Set the module link speed */
608 esdp_reg &= ~IXGBE_ESDP_SDP5;
609 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
610 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
612 /* Allow module to change analog characteristics (10G->1G) */
615 status = ixgbe_setup_mac_link_speed_82599(
616 hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
617 autoneg_wait_to_complete);
618 if (status != IXGBE_SUCCESS)
621 /* Flap the tx laser if it has not already been done */
622 if (hw->mac.autotry_restart) {
623 /* Disable tx laser; allow 100us to go dark per spec */
624 esdp_reg |= IXGBE_ESDP_SDP3;
625 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
628 /* Enable tx laser; allow 2ms to light up per spec */
629 esdp_reg &= ~IXGBE_ESDP_SDP3;
630 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
633 hw->mac.autotry_restart = FALSE;
636 /* Wait for the link partner to also set speed */
639 /* If we have link, just jump out */
640 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
641 if (status != IXGBE_SUCCESS)
649 * We didn't get link. Configure back to the highest speed we tried,
650 * (if there was more than one). We call ourselves back with just the
651 * single highest speed that the user requested.
654 status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw,
655 highest_link_speed, autoneg, autoneg_wait_to_complete);
662 * ixgbe_check_mac_link_82599 - Determine link and speed status
663 * @hw: pointer to hardware structure
664 * @speed: pointer to link speed
665 * @link_up: TRUE when link is up
666 * @link_up_wait_to_complete: bool used to wait for link up or not
668 * Reads the links register to determine if link is up and the current speed
670 s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
671 bool *link_up, bool link_up_wait_to_complete)
676 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
677 if (link_up_wait_to_complete) {
678 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
679 if (links_reg & IXGBE_LINKS_UP) {
686 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
689 if (links_reg & IXGBE_LINKS_UP)
695 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
696 IXGBE_LINKS_SPEED_10G_82599)
697 *speed = IXGBE_LINK_SPEED_10GB_FULL;
698 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
699 IXGBE_LINKS_SPEED_1G_82599)
700 *speed = IXGBE_LINK_SPEED_1GB_FULL;
702 *speed = IXGBE_LINK_SPEED_100_FULL;
704 /* if link is down, zero out the current_mode */
705 if (*link_up == FALSE) {
706 hw->fc.current_mode = ixgbe_fc_none;
707 hw->fc.fc_was_autonegged = FALSE;
710 return IXGBE_SUCCESS;
714 * ixgbe_setup_mac_link_speed_82599 - Set MAC link speed
715 * @hw: pointer to hardware structure
716 * @speed: new link speed
717 * @autoneg: TRUE if autonegotiation enabled
718 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
720 * Set the link speed in the AUTOC register and restarts link.
722 s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
723 ixgbe_link_speed speed, bool autoneg,
724 bool autoneg_wait_to_complete)
726 s32 status = IXGBE_SUCCESS;
727 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
728 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
729 u32 start_autoc = autoc;
731 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
732 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
733 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
736 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
738 /* Check to see if speed passed in is supported. */
739 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
740 if (status != IXGBE_SUCCESS)
743 speed &= link_capabilities;
745 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
746 status = IXGBE_ERR_LINK_SETUP;
750 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
751 if (hw->mac.orig_link_settings_stored)
752 orig_autoc = hw->mac.orig_autoc;
756 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
757 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
758 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
759 /* Set KX4/KX/KR support according to speed requested */
760 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
761 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
762 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
763 autoc |= IXGBE_AUTOC_KX4_SUPP;
764 if (orig_autoc & IXGBE_AUTOC_KR_SUPP)
765 autoc |= IXGBE_AUTOC_KR_SUPP;
766 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
767 autoc |= IXGBE_AUTOC_KX_SUPP;
768 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
769 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
770 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
771 /* Switch from 1G SFI to 10G SFI if requested */
772 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
773 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
774 autoc &= ~IXGBE_AUTOC_LMS_MASK;
775 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
777 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
778 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
779 /* Switch from 10G SFI to 1G SFI if requested */
780 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
781 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
782 autoc &= ~IXGBE_AUTOC_LMS_MASK;
784 autoc |= IXGBE_AUTOC_LMS_1G_AN;
786 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
790 if (autoc != start_autoc) {
793 autoc |= IXGBE_AUTOC_AN_RESTART;
794 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
796 /* Only poll for autoneg to complete if specified to do so */
797 if (autoneg_wait_to_complete) {
798 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
799 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
800 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
801 links_reg = 0; /*Just in case Autoneg time=0*/
802 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
804 IXGBE_READ_REG(hw, IXGBE_LINKS);
805 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
809 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
811 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
812 DEBUGOUT("Autoneg did not complete.\n");
817 /* Add delay to filter out noises during initial link setup */
826 * ixgbe_setup_copper_link_82599 - Setup copper link settings
827 * @hw: pointer to hardware structure
829 * Restarts the link on PHY and then MAC. Performs autonegotiation if needed.
831 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw)
835 /* Restart autonegotiation on PHY */
836 status = hw->phy.ops.setup_link(hw);
839 ixgbe_setup_mac_link_82599(hw);
845 * ixgbe_setup_copper_link_speed_82599 - Set the PHY autoneg advertised field
846 * @hw: pointer to hardware structure
847 * @speed: new link speed
848 * @autoneg: TRUE if autonegotiation enabled
849 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
851 * Restarts link on PHY and MAC based on settings passed in.
853 static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
854 ixgbe_link_speed speed,
856 bool autoneg_wait_to_complete)
860 /* Setup the PHY according to input speed */
861 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
862 autoneg_wait_to_complete);
864 ixgbe_setup_mac_link_82599(hw);
869 * ixgbe_reset_hw_82599 - Perform hardware reset
870 * @hw: pointer to hardware structure
872 * Resets the hardware by resetting the transmit and receive units, masks
873 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
876 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
878 s32 status = IXGBE_SUCCESS;
884 /* Call adapter stop to disable tx/rx and clear interrupts */
885 hw->mac.ops.stop_adapter(hw);
887 /* PHY ops must be identified and initialized prior to reset */
889 /* Identify PHY and related function pointers */
890 status = hw->phy.ops.init(hw);
892 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
896 /* Setup SFP module if there is one present. */
897 if (hw->phy.sfp_setup_needed) {
898 status = hw->mac.ops.setup_sfp(hw);
899 hw->phy.sfp_setup_needed = FALSE;
902 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
906 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
907 hw->phy.ops.reset(hw);
910 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
911 * access and verify no pending requests before reset
913 status = ixgbe_disable_pcie_master(hw);
914 if (status != IXGBE_SUCCESS) {
915 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
916 DEBUGOUT("PCI-E Master disable polling has failed.\n");
920 * Issue global reset to the MAC. This needs to be a SW reset.
921 * If link reset is used, it might reset the MAC when mng is using it
923 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
924 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
925 IXGBE_WRITE_FLUSH(hw);
927 /* Poll for reset bit to self-clear indicating reset is complete */
928 for (i = 0; i < 10; i++) {
930 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
931 if (!(ctrl & IXGBE_CTRL_RST))
934 if (ctrl & IXGBE_CTRL_RST) {
935 status = IXGBE_ERR_RESET_FAILED;
936 DEBUGOUT("Reset polling failed to complete.\n");
938 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
939 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
940 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
941 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
948 * Store the original AUTOC/AUTOC2 values if they have not been
949 * stored off yet. Otherwise restore the stored original
950 * values since the reset operation sets back to defaults.
952 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
953 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
954 if (hw->mac.orig_link_settings_stored == FALSE) {
955 hw->mac.orig_autoc = autoc;
956 hw->mac.orig_autoc2 = autoc2;
957 hw->mac.orig_link_settings_stored = TRUE;
959 if (autoc != hw->mac.orig_autoc)
960 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
961 IXGBE_AUTOC_AN_RESTART));
963 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
964 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
965 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
966 autoc2 |= (hw->mac.orig_autoc2 &
967 IXGBE_AUTOC2_UPPER_MASK);
968 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
972 /* Store the permanent mac address */
973 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
976 * Store MAC address from RAR0, clear receive address registers, and
977 * clear the multicast table. Also reset num_rar_entries to 128,
978 * since we modify this value when programming the SAN MAC address.
980 hw->mac.num_rar_entries = 128;
981 hw->mac.ops.init_rx_addrs(hw);
985 /* Store the permanent SAN mac address */
986 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
988 /* Add the SAN MAC address to the RAR only if it's a valid address */
989 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
990 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
991 hw->mac.san_addr, 0, IXGBE_RAH_AV);
993 /* Reserve the last RAR for the SAN MAC address */
994 hw->mac.num_rar_entries--;
1002 * ixgbe_insert_mac_addr_82599 - Find a RAR for this mac address
1003 * @hw: pointer to hardware structure
1004 * @addr: Address to put into receive address register
1005 * @vmdq: VMDq pool to assign
1007 * Puts an ethernet address into a receive address register, or
1008 * finds the rar that it is aleady in; adds to the pool list
1010 s32 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1012 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
1013 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
1015 u32 rar_low, rar_high;
1016 u32 addr_low, addr_high;
1018 /* swap bytes for HW little endian */
1019 addr_low = addr[0] | (addr[1] << 8)
1022 addr_high = addr[4] | (addr[5] << 8);
1025 * Either find the mac_id in rar or find the first empty space.
1026 * rar_highwater points to just after the highest currently used
1027 * rar in order to shorten the search. It grows when we add a new
1030 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
1031 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
1033 if (((IXGBE_RAH_AV & rar_high) == 0)
1034 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
1035 first_empty_rar = rar;
1036 } else if ((rar_high & 0xFFFF) == addr_high) {
1037 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
1038 if (rar_low == addr_low)
1039 break; /* found it already in the rars */
1043 if (rar < hw->mac.rar_highwater) {
1044 /* already there so just add to the pool bits */
1045 ixgbe_set_vmdq(hw, rar, vmdq);
1046 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
1047 /* stick it into first empty RAR slot we found */
1048 rar = first_empty_rar;
1049 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1050 } else if (rar == hw->mac.rar_highwater) {
1051 /* add it to the top of the list and inc the highwater mark */
1052 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1053 hw->mac.rar_highwater++;
1054 } else if (rar >= hw->mac.num_rar_entries) {
1055 return IXGBE_ERR_INVALID_MAC_ADDR;
1059 * If we found rar[0], make sure the default pool bit (we use pool 0)
1060 * remains cleared to be sure default pool packets will get delivered
1063 ixgbe_clear_vmdq(hw, rar, 0);
1069 * ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address
1070 * @hw: pointer to hardware struct
1071 * @rar: receive address register index to disassociate
1072 * @vmdq: VMDq pool index to remove from the rar
1074 s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1076 u32 mpsar_lo, mpsar_hi;
1077 u32 rar_entries = hw->mac.num_rar_entries;
1079 if (rar < rar_entries) {
1080 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
1081 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
1083 if (!mpsar_lo && !mpsar_hi)
1086 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
1088 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
1092 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
1095 } else if (vmdq < 32) {
1096 mpsar_lo &= ~(1 << vmdq);
1097 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
1099 mpsar_hi &= ~(1 << (vmdq - 32));
1100 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
1103 /* was that the last pool using this rar? */
1104 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
1105 hw->mac.ops.clear_rar(hw, rar);
1107 DEBUGOUT1("RAR index %d is out of range.\n", rar);
1111 return IXGBE_SUCCESS;
1115 * ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address
1116 * @hw: pointer to hardware struct
1117 * @rar: receive address register index to associate with a VMDq index
1118 * @vmdq: VMDq pool index
1120 s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1123 u32 rar_entries = hw->mac.num_rar_entries;
1125 if (rar < rar_entries) {
1127 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
1129 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
1131 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
1132 mpsar |= 1 << (vmdq - 32);
1133 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
1136 DEBUGOUT1("RAR index %d is out of range.\n", rar);
1138 return IXGBE_SUCCESS;
1142 * ixgbe_set_vfta_82599 - Set VLAN filter table
1143 * @hw: pointer to hardware structure
1144 * @vlan: VLAN id to write to VLAN filter
1145 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
1146 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
1148 * Turn on/off specified VLAN in the VLAN filter table.
1150 s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1156 u32 first_empty_slot;
1160 return IXGBE_ERR_PARAM;
1163 * this is a 2 part operation - first the VFTA, then the
1164 * VLVF and VLVFB if VT Mode is set
1168 * The VFTA is a bitstring made up of 128 32-bit registers
1169 * that enable the particular VLAN id, much like the MTA:
1170 * bits[11-5]: which register
1171 * bits[4-0]: which bit in the register
1173 regindex = (vlan >> 5) & 0x7F;
1174 bitindex = vlan & 0x1F;
1175 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1177 bits |= (1 << bitindex);
1179 bits &= ~(1 << bitindex);
1180 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1186 * make sure the vlan is in VLVF
1187 * set the vind bit in the matching VLVFB
1189 * clear the pool bit and possibly the vind
1191 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
1192 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
1193 /* find the vlanid or the first empty slot */
1194 first_empty_slot = 0;
1196 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
1197 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
1198 if (!bits && !first_empty_slot)
1199 first_empty_slot = regindex;
1200 else if ((bits & 0x0FFF) == vlan)
1204 if (regindex >= IXGBE_VLVF_ENTRIES) {
1205 if (first_empty_slot)
1206 regindex = first_empty_slot;
1208 DEBUGOUT("No space in VLVF.\n");
1215 /* set the pool bit */
1217 bits = IXGBE_READ_REG(hw,
1218 IXGBE_VLVFB(regindex*2));
1219 bits |= (1 << vind);
1221 IXGBE_VLVFB(regindex*2),
1224 bits = IXGBE_READ_REG(hw,
1225 IXGBE_VLVFB((regindex*2)+1));
1226 bits |= (1 << vind);
1228 IXGBE_VLVFB((regindex*2)+1),
1232 /* clear the pool bit */
1234 bits = IXGBE_READ_REG(hw,
1235 IXGBE_VLVFB(regindex*2));
1236 bits &= ~(1 << vind);
1238 IXGBE_VLVFB(regindex*2),
1240 bits |= IXGBE_READ_REG(hw,
1241 IXGBE_VLVFB((regindex*2)+1));
1243 bits = IXGBE_READ_REG(hw,
1244 IXGBE_VLVFB((regindex*2)+1));
1245 bits &= ~(1 << vind);
1247 IXGBE_VLVFB((regindex*2)+1),
1249 bits |= IXGBE_READ_REG(hw,
1250 IXGBE_VLVFB(regindex*2));
1255 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
1256 (IXGBE_VLVF_VIEN | vlan));
1258 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
1261 return IXGBE_SUCCESS;
1265 * ixgbe_clear_vfta_82599 - Clear VLAN filter table
1266 * @hw: pointer to hardware structure
1268 * Clears the VLAN filer table, and the VMDq index associated with the filter
1270 s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
1274 for (offset = 0; offset < hw->mac.vft_size; offset++)
1275 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1277 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
1278 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
1279 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
1280 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
1283 return IXGBE_SUCCESS;
1287 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
1288 * @hw: pointer to hardware structure
1290 s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
1293 DEBUGOUT(" Clearing UTA\n");
1295 for (i = 0; i < 128; i++)
1296 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
1298 return IXGBE_SUCCESS;
1302 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1303 * @hw: pointer to hardware structure
1305 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1308 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1309 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1312 * Before starting reinitialization process,
1313 * FDIRCMD.CMD must be zero.
1315 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1316 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1317 IXGBE_FDIRCMD_CMD_MASK))
1321 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1322 DEBUGOUT("Flow Director previous command isn't complete, "
1323 "aborting table re-initialization. \n");
1324 return IXGBE_ERR_FDIR_REINIT_FAILED;
1327 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1328 IXGBE_WRITE_FLUSH(hw);
1330 * 82599 adapters flow director init flow cannot be restarted,
1331 * Workaround 82599 silicon errata by performing the following steps
1332 * before re-writing the FDIRCTRL control register with the same value.
1333 * - write 1 to bit 8 of FDIRCMD register &
1334 * - write 0 to bit 8 of FDIRCMD register
1336 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1337 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1338 IXGBE_FDIRCMD_CLEARHT));
1339 IXGBE_WRITE_FLUSH(hw);
1340 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1341 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1342 ~IXGBE_FDIRCMD_CLEARHT));
1343 IXGBE_WRITE_FLUSH(hw);
1345 * Clear FDIR Hash register to clear any leftover hashes
1346 * waiting to be programmed.
1348 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1349 IXGBE_WRITE_FLUSH(hw);
1351 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1352 IXGBE_WRITE_FLUSH(hw);
1354 /* Poll init-done after we write FDIRCTRL register */
1355 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1356 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1357 IXGBE_FDIRCTRL_INIT_DONE)
1361 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1362 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1363 return IXGBE_ERR_FDIR_REINIT_FAILED;
1366 /* Clear FDIR statistics registers (read to clear) */
1367 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1368 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1369 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1370 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1371 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1373 return IXGBE_SUCCESS;
1377 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1378 * @hw: pointer to hardware structure
1379 * @pballoc: which mode to allocate filters with
1381 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1388 * Before enabling Flow Director, the Rx Packet Buffer size
1389 * must be reduced. The new value is the current size minus
1390 * flow director memory usage size.
1392 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1393 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1394 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1397 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1398 * intialized to zero for non DCB mode otherwise actual total RX PB
1399 * would be bigger than programmed and filter space would run into
1402 for (i = 1; i < 8; i++)
1403 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1405 /* Send interrupt when 64 filters are left */
1406 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1408 /* Set the maximum length per hash bucket to 0xA filters */
1409 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1412 case IXGBE_FDIR_PBALLOC_64K:
1413 /* 8k - 1 signature filters */
1414 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1416 case IXGBE_FDIR_PBALLOC_128K:
1417 /* 16k - 1 signature filters */
1418 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1420 case IXGBE_FDIR_PBALLOC_256K:
1421 /* 32k - 1 signature filters */
1422 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1426 return IXGBE_ERR_CONFIG;
1429 /* Move the flexible bytes to use the ethertype - shift 6 words */
1430 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1433 /* Prime the keys for hashing */
1434 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1435 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1436 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1437 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1440 * Poll init-done after we write the register. Estimated times:
1441 * 10G: PBALLOC = 11b, timing is 60us
1442 * 1G: PBALLOC = 11b, timing is 600us
1443 * 100M: PBALLOC = 11b, timing is 6ms
1445 * Multiple these timings by 4 if under full Rx load
1447 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1448 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1449 * this might not finish in our poll time, but we can live with that
1452 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1453 IXGBE_WRITE_FLUSH(hw);
1454 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1455 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1456 IXGBE_FDIRCTRL_INIT_DONE)
1460 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1461 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1463 return IXGBE_SUCCESS;
1467 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1468 * @hw: pointer to hardware structure
1469 * @pballoc: which mode to allocate filters with
1471 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1478 * Before enabling Flow Director, the Rx Packet Buffer size
1479 * must be reduced. The new value is the current size minus
1480 * flow director memory usage size.
1483 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1484 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1485 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1488 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1489 * intialized to zero for non DCB mode otherwise actual total RX PB
1490 * would be bigger than programmed and filter space would run into
1493 for (i = 1; i < 8; i++)
1494 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1496 /* Send interrupt when 64 filters are left */
1497 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1500 case IXGBE_FDIR_PBALLOC_64K:
1501 /* 2k - 1 perfect filters */
1502 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1504 case IXGBE_FDIR_PBALLOC_128K:
1505 /* 4k - 1 perfect filters */
1506 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1508 case IXGBE_FDIR_PBALLOC_256K:
1509 /* 8k - 1 perfect filters */
1510 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1514 return IXGBE_ERR_CONFIG;
1517 /* Turn perfect match filtering on */
1518 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1519 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1521 /* Move the flexible bytes to use the ethertype - shift 6 words */
1522 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1524 /* Prime the keys for hashing */
1525 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1526 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1527 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1528 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1531 * Poll init-done after we write the register. Estimated times:
1532 * 10G: PBALLOC = 11b, timing is 60us
1533 * 1G: PBALLOC = 11b, timing is 600us
1534 * 100M: PBALLOC = 11b, timing is 6ms
1536 * Multiple these timings by 4 if under full Rx load
1538 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1539 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1540 * this might not finish in our poll time, but we can live with that
1544 /* Set the maximum length per hash bucket to 0xA filters */
1545 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1547 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1548 IXGBE_WRITE_FLUSH(hw);
1549 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1550 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1551 IXGBE_FDIRCTRL_INIT_DONE)
1555 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1556 DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
1558 return IXGBE_SUCCESS;
1563 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1564 * @stream: input bitstream to compute the hash on
1565 * @key: 32-bit hash key
1567 u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
1570 * The algorithm is as follows:
1571 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1572 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1573 * and A[n] x B[n] is bitwise AND between same length strings
1575 * K[n] is 16 bits, defined as:
1576 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1577 * for n modulo 32 < 15, K[n] =
1578 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1580 * S[n] is 16 bits, defined as:
1581 * for n >= 15, S[n] = S[n:n - 15]
1582 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1584 * To simplify for programming, the algorithm is implemented
1585 * in software this way:
1587 * Key[31:0], Stream[335:0]
1589 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
1590 * int_key[350:0] = tmp_key[351:1]
1591 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1594 * for (i = 0; i < 351; i++) {
1596 * hash ^= int_stream[(i + 15):i];
1606 u8 *stream = (u8 *)atr_input;
1607 u8 int_key[44]; /* upper-most bit unused */
1608 u8 hash_str[46]; /* upper-most 2 bits unused */
1609 u16 hash_result = 0;
1613 * Initialize the fill member to prevent warnings
1616 tmp_key.fill[0] = 0;
1618 /* First load the temporary key stream */
1619 for (i = 0; i < 6; i++) {
1620 u64 fillkey = ((u64)key << 32) | key;
1621 tmp_key.fill[i] = fillkey;
1625 * Set the interim key for the hashing. Bit 352 is unused, so we must
1626 * shift and compensate when building the key.
1629 int_key[0] = tmp_key.key_stream[0] >> 1;
1630 for (i = 1, j = 0; i < 44; i++) {
1631 unsigned int this_key = tmp_key.key_stream[j] << 7;
1633 int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1637 * Set the interim bit string for the hashing. Bits 368 and 367 are
1638 * unused, so shift and compensate when building the string.
1640 hash_str[0] = (stream[40] & 0x7f) >> 1;
1641 for (i = 1, j = 40; i < 46; i++) {
1642 unsigned int this_str = stream[j] << 7;
1646 hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1650 * Now compute the hash. i is the index into hash_str, j is into our
1651 * key stream, k is counting the number of bits, and h interates within
1654 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
1655 for (h = 0; h < 8 && k < 351; h++, k++) {
1656 if (int_key[j] & (1 << h)) {
1658 * Key bit is set, XOR in the current 16-bit
1659 * string. Example of processing:
1661 * tmp = (hash_str[i - 2] & 0 << 16) |
1662 * (hash_str[i - 1] & 0xff << 8) |
1663 * (hash_str[i] & 0xff >> 0)
1664 * So tmp = hash_str[15 + k:k], since the
1665 * i + 2 clause rolls off the 16-bit value
1667 * tmp = (hash_str[i - 2] & 0x7f << 9) |
1668 * (hash_str[i - 1] & 0xff << 1) |
1669 * (hash_str[i] & 0x80 >> 7)
1671 int tmp = (hash_str[i] >> h);
1672 tmp |= (hash_str[i - 1] << (8 - h));
1673 tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1675 hash_result ^= (u16)tmp;
1684 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1685 * @input: input stream to modify
1686 * @vlan: the VLAN id to load
1688 s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1690 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1691 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1693 return IXGBE_SUCCESS;
1697 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1698 * @input: input stream to modify
1699 * @src_addr: the IP address to load
1701 s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1703 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1704 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1705 (src_addr >> 16) & 0xff;
1706 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1707 (src_addr >> 8) & 0xff;
1708 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1710 return IXGBE_SUCCESS;
1714 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1715 * @input: input stream to modify
1716 * @dst_addr: the IP address to load
1718 s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1720 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1721 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1722 (dst_addr >> 16) & 0xff;
1723 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1724 (dst_addr >> 8) & 0xff;
1725 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1727 return IXGBE_SUCCESS;
1731 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1732 * @input: input stream to modify
1733 * @src_addr_1: the first 4 bytes of the IP address to load
1734 * @src_addr_2: the second 4 bytes of the IP address to load
1735 * @src_addr_3: the third 4 bytes of the IP address to load
1736 * @src_addr_4: the fourth 4 bytes of the IP address to load
1738 s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1739 u32 src_addr_1, u32 src_addr_2,
1740 u32 src_addr_3, u32 src_addr_4)
1742 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1743 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
1744 (src_addr_4 >> 8) & 0xff;
1745 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
1746 (src_addr_4 >> 16) & 0xff;
1747 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
1749 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
1750 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
1751 (src_addr_3 >> 8) & 0xff;
1752 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
1753 (src_addr_3 >> 16) & 0xff;
1754 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
1756 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
1757 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
1758 (src_addr_2 >> 8) & 0xff;
1759 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
1760 (src_addr_2 >> 16) & 0xff;
1761 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
1763 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
1764 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
1765 (src_addr_1 >> 8) & 0xff;
1766 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
1767 (src_addr_1 >> 16) & 0xff;
1768 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
1770 return IXGBE_SUCCESS;
1774 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1775 * @input: input stream to modify
1776 * @dst_addr_1: the first 4 bytes of the IP address to load
1777 * @dst_addr_2: the second 4 bytes of the IP address to load
1778 * @dst_addr_3: the third 4 bytes of the IP address to load
1779 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1781 s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1782 u32 dst_addr_1, u32 dst_addr_2,
1783 u32 dst_addr_3, u32 dst_addr_4)
1785 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1786 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
1787 (dst_addr_4 >> 8) & 0xff;
1788 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
1789 (dst_addr_4 >> 16) & 0xff;
1790 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
1792 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
1793 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
1794 (dst_addr_3 >> 8) & 0xff;
1795 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
1796 (dst_addr_3 >> 16) & 0xff;
1797 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
1799 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
1800 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
1801 (dst_addr_2 >> 8) & 0xff;
1802 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
1803 (dst_addr_2 >> 16) & 0xff;
1804 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
1806 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
1807 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
1808 (dst_addr_1 >> 8) & 0xff;
1809 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
1810 (dst_addr_1 >> 16) & 0xff;
1811 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
1813 return IXGBE_SUCCESS;
1817 * ixgbe_atr_set_src_port_82599 - Sets the source port
1818 * @input: input stream to modify
1819 * @src_port: the source port to load
1821 s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1823 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1824 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1826 return IXGBE_SUCCESS;
1830 * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1831 * @input: input stream to modify
1832 * @dst_port: the destination port to load
1834 s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1836 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1837 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1839 return IXGBE_SUCCESS;
1843 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1844 * @input: input stream to modify
1845 * @flex_bytes: the flexible bytes to load
1847 s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1849 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1850 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1852 return IXGBE_SUCCESS;
1856 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1857 * @input: input stream to modify
1858 * @vm_pool: the Virtual Machine pool to load
1860 s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
1862 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1864 return IXGBE_SUCCESS;
1868 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1869 * @input: input stream to modify
1870 * @l4type: the layer 4 type value to load
1872 s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1874 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1876 return IXGBE_SUCCESS;
1880 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1881 * @input: input stream to search
1882 * @vlan: the VLAN id to load
1884 s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1886 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1887 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1889 return IXGBE_SUCCESS;
1893 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1894 * @input: input stream to search
1895 * @src_addr: the IP address to load
1897 s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
1899 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1900 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1901 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1902 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1904 return IXGBE_SUCCESS;
1908 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
1909 * @input: input stream to search
1910 * @dst_addr: the IP address to load
1912 s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
1914 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1915 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1916 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1917 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1919 return IXGBE_SUCCESS;
1923 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
1924 * @input: input stream to search
1925 * @src_addr_1: the first 4 bytes of the IP address to load
1926 * @src_addr_2: the second 4 bytes of the IP address to load
1927 * @src_addr_3: the third 4 bytes of the IP address to load
1928 * @src_addr_4: the fourth 4 bytes of the IP address to load
1930 s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1931 u32 *src_addr_1, u32 *src_addr_2,
1932 u32 *src_addr_3, u32 *src_addr_4)
1934 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
1935 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
1936 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
1937 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
1939 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
1940 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
1941 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
1942 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
1944 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
1945 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1946 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1947 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1949 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1950 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1951 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1952 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1954 return IXGBE_SUCCESS;
1958 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
1959 * @input: input stream to search
1960 * @dst_addr_1: the first 4 bytes of the IP address to load
1961 * @dst_addr_2: the second 4 bytes of the IP address to load
1962 * @dst_addr_3: the third 4 bytes of the IP address to load
1963 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1965 s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
1966 u32 *dst_addr_1, u32 *dst_addr_2,
1967 u32 *dst_addr_3, u32 *dst_addr_4)
1969 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
1970 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
1971 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
1972 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
1974 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
1975 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
1976 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
1977 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
1979 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
1980 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
1981 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
1982 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
1984 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
1985 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
1986 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
1987 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
1989 return IXGBE_SUCCESS;
1993 * ixgbe_atr_get_src_port_82599 - Gets the source port
1994 * @input: input stream to modify
1995 * @src_port: the source port to load
1997 * Even though the input is given in big-endian, the FDIRPORT registers
1998 * expect the ports to be programmed in little-endian. Hence the need to swap
1999 * endianness when retrieving the data. This can be confusing since the
2000 * internal hash engine expects it to be big-endian.
2002 s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
2004 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
2005 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
2007 return IXGBE_SUCCESS;
2011 * ixgbe_atr_get_dst_port_82599 - Gets the destination port
2012 * @input: input stream to modify
2013 * @dst_port: the destination port to load
2015 * Even though the input is given in big-endian, the FDIRPORT registers
2016 * expect the ports to be programmed in little-endian. Hence the need to swap
2017 * endianness when retrieving the data. This can be confusing since the
2018 * internal hash engine expects it to be big-endian.
2020 s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
2022 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
2023 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
2025 return IXGBE_SUCCESS;
2029 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
2030 * @input: input stream to modify
2031 * @flex_bytes: the flexible bytes to load
2033 s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
2035 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
2036 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
2038 return IXGBE_SUCCESS;
2042 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
2043 * @input: input stream to modify
2044 * @vm_pool: the Virtual Machine pool to load
2046 s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
2048 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
2050 return IXGBE_SUCCESS;
2054 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
2055 * @input: input stream to modify
2056 * @l4type: the layer 4 type value to load
2058 s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
2060 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
2062 return IXGBE_SUCCESS;
2066 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
2067 * @hw: pointer to hardware structure
2068 * @stream: input bitstream
2069 * @queue: queue index to direct traffic to
2071 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
2072 struct ixgbe_atr_input *input,
2078 u16 bucket_hash, sig_hash;
2081 bucket_hash = ixgbe_atr_compute_hash_82599(input,
2082 IXGBE_ATR_BUCKET_HASH_KEY);
2084 /* bucket_hash is only 15 bits */
2085 bucket_hash &= IXGBE_ATR_HASH_MASK;
2087 sig_hash = ixgbe_atr_compute_hash_82599(input,
2088 IXGBE_ATR_SIGNATURE_HASH_KEY);
2090 /* Get the l4type in order to program FDIRCMD properly */
2091 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
2092 ixgbe_atr_get_l4type_82599(input, &l4type);
2095 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
2096 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
2098 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
2100 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
2101 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
2103 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
2104 case IXGBE_ATR_L4TYPE_TCP:
2105 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
2107 case IXGBE_ATR_L4TYPE_UDP:
2108 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
2110 case IXGBE_ATR_L4TYPE_SCTP:
2111 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
2114 DEBUGOUT(" Error on l4type input\n");
2115 return IXGBE_ERR_CONFIG;
2118 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
2119 fdircmd |= IXGBE_FDIRCMD_IPV6;
2121 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
2122 fdirhashcmd = ((fdircmd << 32) | fdirhash);
2124 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
2125 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
2127 return IXGBE_SUCCESS;
2131 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2132 * @hw: pointer to hardware structure
2133 * @input: input bitstream
2134 * @queue: queue index to direct traffic to
2136 * Note that the caller to this function must lock before calling, since the
2137 * hardware writes must be protected from one another.
2139 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2140 struct ixgbe_atr_input *input,
2146 u32 src_ipv4, dst_ipv4;
2147 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
2148 u16 src_port, dst_port, vlan_id, flex_bytes;
2152 /* Get our input values */
2153 ixgbe_atr_get_l4type_82599(input, &l4type);
2156 * Check l4type formatting, and bail out before we touch the hardware
2157 * if there's a configuration issue
2159 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
2160 case IXGBE_ATR_L4TYPE_TCP:
2161 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
2163 case IXGBE_ATR_L4TYPE_UDP:
2164 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
2166 case IXGBE_ATR_L4TYPE_SCTP:
2167 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
2170 DEBUGOUT(" Error on l4type input\n");
2171 return IXGBE_ERR_CONFIG;
2174 bucket_hash = ixgbe_atr_compute_hash_82599(input,
2175 IXGBE_ATR_BUCKET_HASH_KEY);
2177 /* bucket_hash is only 15 bits */
2178 bucket_hash &= IXGBE_ATR_HASH_MASK;
2180 ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
2181 ixgbe_atr_get_src_port_82599(input, &src_port);
2182 ixgbe_atr_get_dst_port_82599(input, &dst_port);
2183 ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
2185 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
2187 /* Now figure out if we're IPv4 or IPv6 */
2188 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
2190 ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
2191 &src_ipv6_3, &src_ipv6_4);
2193 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
2194 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
2195 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
2196 /* The last 4 bytes is the same register as IPv4 */
2197 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
2199 fdircmd |= IXGBE_FDIRCMD_IPV6;
2200 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
2203 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
2204 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
2208 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
2209 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
2211 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
2212 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
2213 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
2214 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
2216 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
2217 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
2218 fdircmd |= IXGBE_FDIRCMD_LAST;
2219 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
2220 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
2222 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2223 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
2225 return IXGBE_SUCCESS;
2229 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2230 * @hw: pointer to hardware structure
2231 * @reg: analog register to read
2234 * Performs read operation to Omer analog register specified.
2236 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2240 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2242 IXGBE_WRITE_FLUSH(hw);
2244 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2245 *val = (u8)core_ctl;
2247 return IXGBE_SUCCESS;
2251 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2252 * @hw: pointer to hardware structure
2253 * @reg: atlas register to write
2254 * @val: value to write
2256 * Performs write operation to Omer analog register specified.
2258 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2262 core_ctl = (reg << 8) | val;
2263 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2264 IXGBE_WRITE_FLUSH(hw);
2267 return IXGBE_SUCCESS;
2271 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
2272 * @hw: pointer to hardware structure
2274 * Starts the hardware using the generic start_hw function.
2275 * Then performs revision-specific operations:
2276 * Clears the rate limiter registers.
2278 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
2281 s32 ret_val = IXGBE_SUCCESS;
2283 ret_val = ixgbe_start_hw_generic(hw);
2285 /* Clear the rate limiters */
2286 for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) {
2287 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num);
2288 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
2290 IXGBE_WRITE_FLUSH(hw);
2292 /* We need to run link autotry after the driver loads */
2293 hw->mac.autotry_restart = TRUE;
2295 if (ret_val == IXGBE_SUCCESS)
2296 ret_val = ixgbe_verify_fw_version_82599(hw);
2301 * ixgbe_identify_phy_82599 - Get physical layer module
2302 * @hw: pointer to hardware structure
2304 * Determines the physical layer module found on the current adapter.
2305 * If PHY already detected, maintains current PHY type in hw struct,
2306 * otherwise executes the PHY detection routine.
2308 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2310 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2312 /* Detect PHY if not unknown - returns success if already detected. */
2313 status = ixgbe_identify_phy_generic(hw);
2314 if (status != IXGBE_SUCCESS)
2315 status = ixgbe_identify_sfp_module_generic(hw);
2316 /* Set PHY type none if no PHY detected */
2317 if (hw->phy.type == ixgbe_phy_unknown) {
2318 hw->phy.type = ixgbe_phy_none;
2319 status = IXGBE_SUCCESS;
2322 /* Return error if SFP module has been detected but is not supported */
2323 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2324 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2330 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2331 * @hw: pointer to hardware structure
2333 * Determines physical layer capabilities of the current configuration.
2335 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2337 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2338 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2339 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2340 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2341 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2342 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2343 u16 ext_ability = 0;
2344 u8 comp_codes_10g = 0;
2346 hw->phy.ops.identify(hw);
2348 if (hw->phy.type == ixgbe_phy_tn ||
2349 hw->phy.type == ixgbe_phy_aq ||
2350 hw->phy.type == ixgbe_phy_cu_unknown) {
2351 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2352 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2353 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2354 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2355 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2356 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2357 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2358 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2362 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2363 case IXGBE_AUTOC_LMS_1G_AN:
2364 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2365 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2366 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2367 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2370 /* SFI mode so read SFP module */
2373 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2374 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2375 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2376 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2377 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2378 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2379 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2382 case IXGBE_AUTOC_LMS_10G_SERIAL:
2383 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2384 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2386 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2389 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2390 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2391 if (autoc & IXGBE_AUTOC_KX_SUPP)
2392 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2393 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2394 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2395 if (autoc & IXGBE_AUTOC_KR_SUPP)
2396 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2405 /* SFP check must be done last since DA modules are sometimes used to
2406 * test KR mode - we need to id KR mode correctly before SFP module.
2407 * Call identify_sfp because the pluggable module may have changed */
2408 hw->phy.ops.identify_sfp(hw);
2409 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2412 switch (hw->phy.type) {
2413 case ixgbe_phy_tw_tyco:
2414 case ixgbe_phy_tw_unknown:
2415 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2417 case ixgbe_phy_sfp_avago:
2418 case ixgbe_phy_sfp_ftl:
2419 case ixgbe_phy_sfp_intel:
2420 case ixgbe_phy_sfp_unknown:
2421 hw->phy.ops.read_i2c_eeprom(hw,
2422 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2423 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2424 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2425 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2426 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2433 return physical_layer;
2437 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2438 * @hw: pointer to hardware structure
2439 * @regval: register value to write to RXCTRL
2441 * Enables the Rx DMA unit for 82599
2443 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2445 #define IXGBE_MAX_SECRX_POLL 30
2450 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2451 * If traffic is incoming before we enable the Rx unit, it could hang
2452 * the Rx DMA unit. Therefore, make sure the security engine is
2453 * completely disabled prior to enabling the Rx unit.
2455 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2456 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2457 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2458 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2459 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2460 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2463 /* Use interrupt-safe sleep just in case */
2467 /* For informational purposes only */
2468 if (i >= IXGBE_MAX_SECRX_POLL)
2469 DEBUGOUT("Rx unit being enabled before security "
2470 "path fully disabled. Continuing with init.\n");
2472 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2473 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2474 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2475 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2476 IXGBE_WRITE_FLUSH(hw);
2478 return IXGBE_SUCCESS;
2482 * ixgbe_get_device_caps_82599 - Get additional device capabilities
2483 * @hw: pointer to hardware structure
2484 * @device_caps: the EEPROM word with the extra device capabilities
2486 * This function will read the EEPROM location for the device capabilities,
2487 * and return the word through device_caps.
2489 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2491 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
2493 return IXGBE_SUCCESS;
2497 * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
2498 * @hw: pointer to hardware structure
2499 * @san_mac_offset: SAN MAC address offset
2501 * This function will read the EEPROM location for the SAN MAC address
2502 * pointer, and returns the value at that location. This is used in both
2503 * get and set mac_addr routines.
2505 s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
2506 u16 *san_mac_offset)
2509 * First read the EEPROM pointer to see if the MAC addresses are
2512 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2514 return IXGBE_SUCCESS;
2518 * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
2519 * @hw: pointer to hardware structure
2520 * @san_mac_addr: SAN MAC address
2522 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2523 * per-port, so set_lan_id() must be called before reading the addresses.
2524 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2525 * upon for non-SFP connections, so we must call it here.
2527 s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
2529 u16 san_mac_data, san_mac_offset;
2533 * First read the EEPROM pointer to see if the MAC addresses are
2534 * available. If they're not, no point in calling set_lan_id() here.
2536 ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
2538 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2540 * No addresses available in this EEPROM. It's not an
2541 * error though, so just wipe the local address and return.
2543 for (i = 0; i < 6; i++)
2544 san_mac_addr[i] = 0xFF;
2546 goto san_mac_addr_out;
2549 /* make sure we know which port we need to program */
2550 hw->mac.ops.set_lan_id(hw);
2551 /* apply the port offset to the address offset */
2552 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2553 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2554 for (i = 0; i < 3; i++) {
2555 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2556 san_mac_addr[i * 2] = (u8)(san_mac_data);
2557 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2562 return IXGBE_SUCCESS;
2566 * ixgbe_set_san_mac_addr_82599 - Write the SAN MAC address to the EEPROM
2567 * @hw: pointer to hardware structure
2568 * @san_mac_addr: SAN MAC address
2570 * Write a SAN MAC address to the EEPROM.
2572 s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
2574 s32 status = IXGBE_SUCCESS;
2575 u16 san_mac_data, san_mac_offset;
2578 /* Look for SAN mac address pointer. If not defined, return */
2579 ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
2581 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2582 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
2583 goto san_mac_addr_out;
2586 /* Make sure we know which port we need to write */
2587 hw->mac.ops.set_lan_id(hw);
2588 /* Apply the port offset to the address offset */
2589 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2590 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2592 for (i = 0; i < 3; i++) {
2593 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
2594 san_mac_data |= (u16)(san_mac_addr[i * 2]);
2595 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
2604 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2605 * @hw: pointer to hardware structure
2607 * Verifies that installed the firmware version is 0.6 or higher
2608 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2610 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2611 * if the FW version is not supported.
2613 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2615 s32 status = IXGBE_ERR_EEPROM_VERSION;
2616 u16 fw_offset, fw_ptp_cfg_offset;
2619 /* firmware check is only necessary for SFI devices */
2620 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2621 status = IXGBE_SUCCESS;
2622 goto fw_version_out;
2625 /* get the offset to the Firmware Module block */
2626 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2628 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2629 goto fw_version_out;
2631 /* get the offset to the Pass Through Patch Configuration block */
2632 hw->eeprom.ops.read(hw, (fw_offset +
2633 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2634 &fw_ptp_cfg_offset);
2636 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2637 goto fw_version_out;
2639 /* get the firmware version */
2640 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2641 IXGBE_FW_PATCH_VERSION_4),
2644 if (fw_version > 0x5)
2645 status = IXGBE_SUCCESS;