1 /******************************************************************************
3 Copyright (c) 2001-2009, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixgbe_type.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
40 u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw);
41 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
42 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
43 ixgbe_link_speed *speed,
45 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
46 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw);
47 s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
48 ixgbe_link_speed speed, bool autoneg,
49 bool autoneg_wait_to_complete);
50 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw);
51 s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
52 ixgbe_link_speed *speed,
53 bool *link_up, bool link_up_wait_to_complete);
54 s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
55 ixgbe_link_speed speed,
57 bool autoneg_wait_to_complete);
58 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw);
59 static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
60 ixgbe_link_speed speed,
62 bool autoneg_wait_to_complete);
63 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
64 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
65 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
66 s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
67 s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
68 s32 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
69 s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan,
70 u32 vind, bool vlan_on);
71 s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw);
72 s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw);
73 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
74 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
75 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
76 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
77 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
78 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
79 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
80 s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
82 s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr);
83 s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr);
84 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
86 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
88 struct ixgbe_mac_info *mac = &hw->mac;
90 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
92 if (hw->phy.multispeed_fiber) {
93 /* Set up dual speed SFP+ support */
95 &ixgbe_setup_mac_link_multispeed_fiber;
96 mac->ops.setup_link_speed =
97 &ixgbe_setup_mac_link_speed_multispeed_fiber;
100 &ixgbe_setup_mac_link_82599;
101 mac->ops.setup_link_speed =
102 &ixgbe_setup_mac_link_speed_82599;
107 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
108 * @hw: pointer to hardware structure
110 * Initialize any function pointers that were not able to be
111 * set during init_shared_code because the PHY/SFP type was
112 * not known. Perform the SFP init if necessary.
115 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
117 struct ixgbe_mac_info *mac = &hw->mac;
118 struct ixgbe_phy_info *phy = &hw->phy;
119 s32 ret_val = IXGBE_SUCCESS;
121 DEBUGFUNC("ixgbe_init_phy_ops_82599");
123 /* Identify the PHY or SFP module */
124 ret_val = phy->ops.identify(hw);
125 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
126 goto init_phy_ops_out;
128 /* Setup function pointers based on detected SFP module and speeds */
129 ixgbe_init_mac_link_ops_82599(hw);
130 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
131 hw->phy.ops.reset = NULL;
133 /* If copper media, overwrite with copper function pointers */
134 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
135 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
136 mac->ops.setup_link_speed =
137 &ixgbe_setup_copper_link_speed_82599;
138 mac->ops.get_link_capabilities =
139 &ixgbe_get_copper_link_capabilities_generic;
142 /* Set necessary function pointers based on phy type */
143 switch (hw->phy.type) {
145 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
146 phy->ops.get_firmware_version =
147 &ixgbe_get_phy_firmware_version_tnx;
150 phy->ops.get_firmware_version =
151 &ixgbe_get_phy_firmware_version_aq;
160 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
162 s32 ret_val = IXGBE_SUCCESS;
163 u16 list_offset, data_offset, data_value;
165 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
167 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
168 ixgbe_init_mac_link_ops_82599(hw);
170 hw->phy.ops.reset = NULL;
172 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
174 if (ret_val != IXGBE_SUCCESS)
177 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
178 while (data_value != 0xffff) {
179 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
180 IXGBE_WRITE_FLUSH(hw);
181 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
183 /* Now restart DSP */
184 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000102);
185 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000b1d);
186 IXGBE_WRITE_FLUSH(hw);
194 * ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count
195 * @hw: pointer to hardware structure
197 * Read PCIe configuration space, and get the MSI-X vector count from
198 * the capabilities table.
200 u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
204 if (hw->mac.msix_vectors_from_pcie) {
205 msix_count = IXGBE_READ_PCIE_WORD(hw,
206 IXGBE_PCIE_MSIX_82599_CAPS);
207 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
209 /* MSI-X count is zero-based in HW, so increment to give
218 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
219 * @hw: pointer to hardware structure
221 * Initialize the function pointers and assign the MAC type for 82599.
222 * Does not touch the hardware.
225 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
227 struct ixgbe_mac_info *mac = &hw->mac;
228 struct ixgbe_phy_info *phy = &hw->phy;
231 ret_val = ixgbe_init_phy_ops_generic(hw);
232 ret_val = ixgbe_init_ops_generic(hw);
235 phy->ops.identify = &ixgbe_identify_phy_82599;
236 phy->ops.init = &ixgbe_init_phy_ops_82599;
239 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
240 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
241 mac->ops.get_supported_physical_layer =
242 &ixgbe_get_supported_physical_layer_82599;
243 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
244 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
245 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
246 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
247 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_82599;
248 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_82599;
249 mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
251 /* RAR, Multicast, VLAN */
252 mac->ops.set_vmdq = &ixgbe_set_vmdq_82599;
253 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82599;
254 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_82599;
255 mac->rar_highwater = 1;
256 mac->ops.set_vfta = &ixgbe_set_vfta_82599;
257 mac->ops.clear_vfta = &ixgbe_clear_vfta_82599;
258 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_82599;
259 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
262 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
263 mac->ops.check_link = &ixgbe_check_mac_link_82599;
264 ixgbe_init_mac_link_ops_82599(hw);
266 mac->mcft_size = 128;
268 mac->num_rar_entries = 128;
269 mac->max_tx_queues = 128;
270 mac->max_rx_queues = 128;
271 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
277 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
278 * @hw: pointer to hardware structure
279 * @speed: pointer to link speed
280 * @negotiation: TRUE when autoneg or autotry is enabled
282 * Determines the link capabilities by reading the AUTOC register.
284 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
285 ixgbe_link_speed *speed,
288 s32 status = IXGBE_SUCCESS;
292 * Determine link capabilities based on the stored value of AUTOC,
293 * which represents EEPROM defaults. If AUTOC value has not
294 * been stored, use the current register values.
296 if (hw->mac.orig_link_settings_stored)
297 autoc = hw->mac.orig_autoc;
299 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
301 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
302 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
303 *speed = IXGBE_LINK_SPEED_1GB_FULL;
304 *negotiation = FALSE;
307 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
308 *speed = IXGBE_LINK_SPEED_10GB_FULL;
309 *negotiation = FALSE;
312 case IXGBE_AUTOC_LMS_1G_AN:
313 *speed = IXGBE_LINK_SPEED_1GB_FULL;
317 case IXGBE_AUTOC_LMS_10G_SERIAL:
318 *speed = IXGBE_LINK_SPEED_10GB_FULL;
319 *negotiation = FALSE;
322 case IXGBE_AUTOC_LMS_KX4_KX_KR:
323 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
324 *speed = IXGBE_LINK_SPEED_UNKNOWN;
325 if (autoc & IXGBE_AUTOC_KR_SUPP)
326 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
327 if (autoc & IXGBE_AUTOC_KX4_SUPP)
328 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
329 if (autoc & IXGBE_AUTOC_KX_SUPP)
330 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
334 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
335 *speed = IXGBE_LINK_SPEED_100_FULL;
336 if (autoc & IXGBE_AUTOC_KR_SUPP)
337 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
338 if (autoc & IXGBE_AUTOC_KX4_SUPP)
339 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
340 if (autoc & IXGBE_AUTOC_KX_SUPP)
341 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
345 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
346 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
347 *negotiation = FALSE;
351 status = IXGBE_ERR_LINK_SETUP;
356 if (hw->phy.multispeed_fiber) {
357 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
358 IXGBE_LINK_SPEED_1GB_FULL;
367 * ixgbe_get_media_type_82599 - Get media type
368 * @hw: pointer to hardware structure
370 * Returns the media type (fiber, copper, backplane)
372 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
374 enum ixgbe_media_type media_type;
376 /* Detect if there is a copper PHY attached. */
377 if (hw->phy.type == ixgbe_phy_cu_unknown ||
378 hw->phy.type == ixgbe_phy_tn ||
379 hw->phy.type == ixgbe_phy_aq) {
380 media_type = ixgbe_media_type_copper;
384 switch (hw->device_id) {
385 case IXGBE_DEV_ID_82599_KX4:
386 /* Default device ID is mezzanine card KX/KX4 */
387 media_type = ixgbe_media_type_backplane;
389 case IXGBE_DEV_ID_82599_SFP:
390 media_type = ixgbe_media_type_fiber;
392 case IXGBE_DEV_ID_82599_CX4:
393 media_type = ixgbe_media_type_fiber;
396 media_type = ixgbe_media_type_unknown;
404 * ixgbe_setup_mac_link_82599 - Setup MAC link settings
405 * @hw: pointer to hardware structure
407 * Configures link settings based on values in the ixgbe_hw struct.
408 * Restarts the link. Performs autonegotiation if needed.
410 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
415 s32 status = IXGBE_SUCCESS;
418 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
419 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
420 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
422 /* Only poll for autoneg to complete if specified to do so */
423 if (hw->phy.autoneg_wait_to_complete) {
424 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
425 IXGBE_AUTOC_LMS_KX4_KX_KR ||
426 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
427 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN
428 || (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
429 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
430 links_reg = 0; /* Just in case Autoneg time = 0 */
431 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
432 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
433 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
437 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
438 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
439 DEBUGOUT("Autoneg did not complete.\n");
444 /* Add delay to filter out noises during initial link setup */
451 * ixgbe_setup_mac_link_multispeed_fiber - Setup MAC link settings
452 * @hw: pointer to hardware structure
454 * Configures link settings based on values in the ixgbe_hw struct.
455 * Restarts the link for multi-speed fiber at 1G speed, if link
457 * Performs autonegotiation if needed.
459 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw)
461 s32 status = IXGBE_SUCCESS;
462 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_82599_AUTONEG;
463 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
465 status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw,
466 link_speed, TRUE, true);
471 * ixgbe_setup_mac_link_speed_multispeed_fiber - Set MAC link speed
472 * @hw: pointer to hardware structure
473 * @speed: new link speed
474 * @autoneg: TRUE if autonegotiation enabled
475 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
477 * Set the link speed in the AUTOC register and restarts link.
479 s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
480 ixgbe_link_speed speed, bool autoneg,
481 bool autoneg_wait_to_complete)
483 s32 status = IXGBE_SUCCESS;
484 ixgbe_link_speed link_speed;
485 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
487 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
488 bool link_up = FALSE;
491 /* Mask off requested but non-supported speeds */
492 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
493 if (status != IXGBE_SUCCESS)
499 * Try each speed one by one, highest priority first. We do this in
500 * software because 10gb fiber doesn't support speed autonegotiation.
502 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
504 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
506 /* If we already have link at this speed, just jump out */
507 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
508 if (status != IXGBE_SUCCESS)
511 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
514 /* Set hardware SDP's */
515 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
516 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
518 /* Allow module to change analog characteristics (1G->10G) */
521 status = ixgbe_setup_mac_link_speed_82599(
522 hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
523 autoneg_wait_to_complete);
524 if (status != IXGBE_SUCCESS)
529 /* If we have link, just jump out */
530 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
531 if (status != IXGBE_SUCCESS)
538 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
540 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
541 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
543 /* If we already have link at this speed, just jump out */
544 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
545 if (status != IXGBE_SUCCESS)
548 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
551 /* Set hardware SDP's */
552 esdp_reg &= ~IXGBE_ESDP_SDP5;
553 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
554 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
556 /* Allow module to change analog characteristics (10G->1G) */
559 status = ixgbe_setup_mac_link_speed_82599(
560 hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
561 autoneg_wait_to_complete);
562 if (status != IXGBE_SUCCESS)
567 /* If we have link, just jump out */
568 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
569 if (status != IXGBE_SUCCESS)
577 * We didn't get link. Configure back to the highest speed we tried,
578 * (if there was more than one). We call ourselves back with just the
579 * single highest speed that the user requested.
582 status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw,
583 highest_link_speed, autoneg, autoneg_wait_to_complete);
590 * ixgbe_check_mac_link_82599 - Determine link and speed status
591 * @hw: pointer to hardware structure
592 * @speed: pointer to link speed
593 * @link_up: TRUE when link is up
594 * @link_up_wait_to_complete: bool used to wait for link up or not
596 * Reads the links register to determine if link is up and the current speed
598 s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
599 bool *link_up, bool link_up_wait_to_complete)
604 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
605 if (link_up_wait_to_complete) {
606 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
607 if (links_reg & IXGBE_LINKS_UP) {
614 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
617 if (links_reg & IXGBE_LINKS_UP)
623 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
624 IXGBE_LINKS_SPEED_10G_82599)
625 *speed = IXGBE_LINK_SPEED_10GB_FULL;
626 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
627 IXGBE_LINKS_SPEED_1G_82599)
628 *speed = IXGBE_LINK_SPEED_1GB_FULL;
630 *speed = IXGBE_LINK_SPEED_100_FULL;
632 /* if link is down, zero out the current_mode */
633 if (*link_up == FALSE) {
634 hw->fc.current_mode = ixgbe_fc_none;
635 hw->fc.fc_was_autonegged = FALSE;
638 return IXGBE_SUCCESS;
642 * ixgbe_setup_mac_link_speed_82599 - Set MAC link speed
643 * @hw: pointer to hardware structure
644 * @speed: new link speed
645 * @autoneg: TRUE if autonegotiation enabled
646 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
648 * Set the link speed in the AUTOC register and restarts link.
650 s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
651 ixgbe_link_speed speed, bool autoneg,
652 bool autoneg_wait_to_complete)
654 s32 status = IXGBE_SUCCESS;
655 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
656 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
658 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
659 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
660 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
663 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
665 /* Check to see if speed passed in is supported. */
666 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
667 if (status != IXGBE_SUCCESS)
670 speed &= link_capabilities;
672 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
673 if (hw->mac.orig_link_settings_stored)
674 orig_autoc = hw->mac.orig_autoc;
679 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
680 status = IXGBE_ERR_LINK_SETUP;
681 } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
682 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
683 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
684 /* Set KX4/KX/KR support according to speed requested */
685 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
686 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
687 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
688 autoc |= IXGBE_AUTOC_KX4_SUPP;
689 if (orig_autoc & IXGBE_AUTOC_KR_SUPP)
690 autoc |= IXGBE_AUTOC_KR_SUPP;
691 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
692 autoc |= IXGBE_AUTOC_KX_SUPP;
693 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
694 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
695 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
696 /* Switch from 1G SFI to 10G SFI if requested */
697 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
698 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
699 autoc &= ~IXGBE_AUTOC_LMS_MASK;
700 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
702 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
703 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
704 /* Switch from 10G SFI to 1G SFI if requested */
705 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
706 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
707 autoc &= ~IXGBE_AUTOC_LMS_MASK;
709 autoc |= IXGBE_AUTOC_LMS_1G_AN;
711 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
715 if (status == IXGBE_SUCCESS) {
717 autoc |= IXGBE_AUTOC_AN_RESTART;
718 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
720 /* Only poll for autoneg to complete if specified to do so */
721 if (autoneg_wait_to_complete) {
722 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
723 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
724 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
725 links_reg = 0; /*Just in case Autoneg time=0*/
726 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
728 IXGBE_READ_REG(hw, IXGBE_LINKS);
729 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
733 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
735 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
736 DEBUGOUT("Autoneg did not complete.\n");
741 /* Add delay to filter out noises during initial link setup */
750 * ixgbe_setup_copper_link_82599 - Setup copper link settings
751 * @hw: pointer to hardware structure
753 * Restarts the link on PHY and then MAC. Performs autonegotiation if needed.
755 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw)
759 /* Restart autonegotiation on PHY */
760 status = hw->phy.ops.setup_link(hw);
763 ixgbe_setup_mac_link_82599(hw);
769 * ixgbe_setup_copper_link_speed_82599 - Set the PHY autoneg advertised field
770 * @hw: pointer to hardware structure
771 * @speed: new link speed
772 * @autoneg: TRUE if autonegotiation enabled
773 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
775 * Restarts link on PHY and MAC based on settings passed in.
777 static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
778 ixgbe_link_speed speed,
780 bool autoneg_wait_to_complete)
784 /* Setup the PHY according to input speed */
785 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
786 autoneg_wait_to_complete);
788 ixgbe_setup_mac_link_82599(hw);
793 * ixgbe_reset_hw_82599 - Perform hardware reset
794 * @hw: pointer to hardware structure
796 * Resets the hardware by resetting the transmit and receive units, masks
797 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
800 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
802 s32 status = IXGBE_SUCCESS;
808 /* Call adapter stop to disable tx/rx and clear interrupts */
809 hw->mac.ops.stop_adapter(hw);
811 /* PHY ops must be identified and initialized prior to reset */
813 /* Identify PHY and related function pointers */
814 status = hw->phy.ops.init(hw);
816 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
820 /* Setup SFP module if there is one present. */
821 if (hw->phy.sfp_setup_needed) {
822 status = hw->mac.ops.setup_sfp(hw);
823 hw->phy.sfp_setup_needed = FALSE;
826 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
830 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
831 hw->phy.ops.reset(hw);
834 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
835 * access and verify no pending requests before reset
837 status = ixgbe_disable_pcie_master(hw);
838 if (status != IXGBE_SUCCESS) {
839 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
840 DEBUGOUT("PCI-E Master disable polling has failed.\n");
844 * Issue global reset to the MAC. This needs to be a SW reset.
845 * If link reset is used, it might reset the MAC when mng is using it
847 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
848 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
849 IXGBE_WRITE_FLUSH(hw);
851 /* Poll for reset bit to self-clear indicating reset is complete */
852 for (i = 0; i < 10; i++) {
854 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
855 if (!(ctrl & IXGBE_CTRL_RST))
858 if (ctrl & IXGBE_CTRL_RST) {
859 status = IXGBE_ERR_RESET_FAILED;
860 DEBUGOUT("Reset polling failed to complete.\n");
862 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
863 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
864 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
865 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
872 * Store the original AUTOC/AUTOC2 values if they have not been
873 * stored off yet. Otherwise restore the stored original
874 * values since the reset operation sets back to defaults.
876 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
877 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
878 if (hw->mac.orig_link_settings_stored == FALSE) {
879 hw->mac.orig_autoc = autoc;
880 hw->mac.orig_autoc2 = autoc2;
881 hw->mac.orig_link_settings_stored = TRUE;
883 if (autoc != hw->mac.orig_autoc)
884 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
885 IXGBE_AUTOC_AN_RESTART));
887 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
888 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
889 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
890 autoc2 |= (hw->mac.orig_autoc2 &
891 IXGBE_AUTOC2_UPPER_MASK);
892 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
897 * Store MAC address from RAR0, clear receive address registers, and
898 * clear the multicast table. Also reset num_rar_entries to 128,
899 * since we modify this value when programming the SAN MAC address.
901 hw->mac.num_rar_entries = 128;
902 hw->mac.ops.init_rx_addrs(hw);
904 /* Store the permanent mac address */
905 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
907 /* Store the permanent SAN mac address */
908 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
910 /* Add the SAN MAC address to the RAR only if it's a valid address */
911 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
912 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
913 hw->mac.san_addr, 0, IXGBE_RAH_AV);
915 /* Reserve the last RAR for the SAN MAC address */
916 hw->mac.num_rar_entries--;
924 * ixgbe_insert_mac_addr_82599 - Find a RAR for this mac address
925 * @hw: pointer to hardware structure
926 * @addr: Address to put into receive address register
927 * @vmdq: VMDq pool to assign
929 * Puts an ethernet address into a receive address register, or
930 * finds the rar that it is aleady in; adds to the pool list
932 s32 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
934 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
935 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
937 u32 rar_low, rar_high;
938 u32 addr_low, addr_high;
940 /* swap bytes for HW little endian */
941 addr_low = addr[0] | (addr[1] << 8)
944 addr_high = addr[4] | (addr[5] << 8);
947 * Either find the mac_id in rar or find the first empty space.
948 * rar_highwater points to just after the highest currently used
949 * rar in order to shorten the search. It grows when we add a new
952 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
953 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
955 if (((IXGBE_RAH_AV & rar_high) == 0)
956 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
957 first_empty_rar = rar;
958 } else if ((rar_high & 0xFFFF) == addr_high) {
959 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
960 if (rar_low == addr_low)
961 break; /* found it already in the rars */
965 if (rar < hw->mac.rar_highwater) {
966 /* already there so just add to the pool bits */
967 ixgbe_set_vmdq(hw, rar, vmdq);
968 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
969 /* stick it into first empty RAR slot we found */
970 rar = first_empty_rar;
971 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
972 } else if (rar == hw->mac.rar_highwater) {
973 /* add it to the top of the list and inc the highwater mark */
974 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
975 hw->mac.rar_highwater++;
976 } else if (rar >= hw->mac.num_rar_entries) {
977 return IXGBE_ERR_INVALID_MAC_ADDR;
981 * If we found rar[0], make sure the default pool bit (we use pool 0)
982 * remains cleared to be sure default pool packets will get delivered
985 ixgbe_clear_vmdq(hw, rar, 0);
991 * ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address
992 * @hw: pointer to hardware struct
993 * @rar: receive address register index to disassociate
994 * @vmdq: VMDq pool index to remove from the rar
996 s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
998 u32 mpsar_lo, mpsar_hi;
999 u32 rar_entries = hw->mac.num_rar_entries;
1001 if (rar < rar_entries) {
1002 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
1003 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
1005 if (!mpsar_lo && !mpsar_hi)
1008 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
1010 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
1014 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
1017 } else if (vmdq < 32) {
1018 mpsar_lo &= ~(1 << vmdq);
1019 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
1021 mpsar_hi &= ~(1 << (vmdq - 32));
1022 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
1025 /* was that the last pool using this rar? */
1026 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
1027 hw->mac.ops.clear_rar(hw, rar);
1029 DEBUGOUT1("RAR index %d is out of range.\n", rar);
1033 return IXGBE_SUCCESS;
1037 * ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address
1038 * @hw: pointer to hardware struct
1039 * @rar: receive address register index to associate with a VMDq index
1040 * @vmdq: VMDq pool index
1042 s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1045 u32 rar_entries = hw->mac.num_rar_entries;
1047 if (rar < rar_entries) {
1049 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
1051 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
1053 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
1054 mpsar |= 1 << (vmdq - 32);
1055 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
1058 DEBUGOUT1("RAR index %d is out of range.\n", rar);
1060 return IXGBE_SUCCESS;
1064 * ixgbe_set_vfta_82599 - Set VLAN filter table
1065 * @hw: pointer to hardware structure
1066 * @vlan: VLAN id to write to VLAN filter
1067 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
1068 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
1070 * Turn on/off specified VLAN in the VLAN filter table.
1072 s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1078 u32 first_empty_slot;
1081 return IXGBE_ERR_PARAM;
1084 * this is a 2 part operation - first the VFTA, then the
1085 * VLVF and VLVFB if vind is set
1089 * The VFTA is a bitstring made up of 128 32-bit registers
1090 * that enable the particular VLAN id, much like the MTA:
1091 * bits[11-5]: which register
1092 * bits[4-0]: which bit in the register
1094 regindex = (vlan >> 5) & 0x7F;
1095 bitindex = vlan & 0x1F;
1096 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1098 bits |= (1 << bitindex);
1100 bits &= ~(1 << bitindex);
1101 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1105 * If the vind is set
1107 * make sure the vlan is in VLVF
1108 * set the vind bit in the matching VLVFB
1110 * clear the pool bit and possibly the vind
1113 /* find the vlanid or the first empty slot */
1114 first_empty_slot = 0;
1116 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
1117 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
1118 if (!bits && !first_empty_slot)
1119 first_empty_slot = regindex;
1120 else if ((bits & 0x0FFF) == vlan)
1124 if (regindex >= IXGBE_VLVF_ENTRIES) {
1125 if (first_empty_slot)
1126 regindex = first_empty_slot;
1128 DEBUGOUT("No space in VLVF.\n");
1135 /* set the pool bit */
1138 IXGBE_READ_REG(hw, IXGBE_VLVFB(regindex*2));
1139 bits |= (1 << vind);
1141 IXGBE_VLVFB(regindex*2), bits);
1143 bits = IXGBE_READ_REG(hw,
1144 IXGBE_VLVFB((regindex*2)+1));
1145 bits |= (1 << vind);
1147 IXGBE_VLVFB((regindex*2)+1), bits);
1150 /* clear the pool bit */
1152 bits = IXGBE_READ_REG(hw,
1153 IXGBE_VLVFB(regindex*2));
1154 bits &= ~(1 << vind);
1156 IXGBE_VLVFB(regindex*2), bits);
1157 bits |= IXGBE_READ_REG(hw,
1158 IXGBE_VLVFB((regindex*2)+1));
1160 bits = IXGBE_READ_REG(hw,
1161 IXGBE_VLVFB((regindex*2)+1));
1162 bits &= ~(1 << vind);
1164 IXGBE_VLVFB((regindex*2)+1), bits);
1165 bits |= IXGBE_READ_REG(hw,
1166 IXGBE_VLVFB(regindex*2));
1171 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
1172 (IXGBE_VLVF_VIEN | vlan));
1174 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
1178 return IXGBE_SUCCESS;
1182 * ixgbe_clear_vfta_82599 - Clear VLAN filter table
1183 * @hw: pointer to hardware structure
1185 * Clears the VLAN filer table, and the VMDq index associated with the filter
1187 s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
1191 for (offset = 0; offset < hw->mac.vft_size; offset++)
1192 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1194 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
1195 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
1196 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
1197 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
1200 return IXGBE_SUCCESS;
1204 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
1205 * @hw: pointer to hardware structure
1207 s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
1210 DEBUGOUT(" Clearing UTA\n");
1212 for (i = 0; i < 128; i++)
1213 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
1215 return IXGBE_SUCCESS;
1219 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1220 * @hw: pointer to hardware structure
1222 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1224 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1225 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1226 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1227 IXGBE_WRITE_FLUSH(hw);
1228 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1230 return IXGBE_SUCCESS;
1233 #define IXGBE_FDIR_INIT_DONE_POLL 10
1235 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1236 * @hw: pointer to hardware structure
1237 * @pballoc: which mode to allocate filters with
1239 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1246 * Before enabling Flow Director, the Rx Packet Buffer size
1247 * must be reduced. The new value is the current size minus
1248 * flow director memory usage size.
1250 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1251 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1252 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1255 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1256 * intialized to zero for non DCB mode otherwise actual total RX PB
1257 * would be bigger than programmed and filter space would run into
1260 for (i = 1; i < 8; i++)
1261 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1263 /* Send interrupt when 64 filters are left */
1264 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1266 /* Set the maximum length per hash bucket to 0xA filters */
1267 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1270 case IXGBE_FDIR_PBALLOC_64K:
1271 /* 8k - 1 signature filters */
1272 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1274 case IXGBE_FDIR_PBALLOC_128K:
1275 /* 16k - 1 signature filters */
1276 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1278 case IXGBE_FDIR_PBALLOC_256K:
1279 /* 32k - 1 signature filters */
1280 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1284 return IXGBE_ERR_CONFIG;
1287 /* Move the flexible bytes to use the ethertype - shift 6 words */
1288 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1291 /* Prime the keys for hashing */
1292 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1293 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1294 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1295 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1298 * Poll init-done after we write the register. Estimated times:
1299 * 10G: PBALLOC = 11b, timing is 60us
1300 * 1G: PBALLOC = 11b, timing is 600us
1301 * 100M: PBALLOC = 11b, timing is 6ms
1303 * Multiple these timings by 4 if under full Rx load
1305 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1306 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1307 * this might not finish in our poll time, but we can live with that
1310 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1311 IXGBE_WRITE_FLUSH(hw);
1312 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1313 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1314 IXGBE_FDIRCTRL_INIT_DONE)
1318 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1319 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1321 return IXGBE_SUCCESS;
1325 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1326 * @hw: pointer to hardware structure
1327 * @pballoc: which mode to allocate filters with
1329 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1336 * Before enabling Flow Director, the Rx Packet Buffer size
1337 * must be reduced. The new value is the current size minus
1338 * flow director memory usage size.
1341 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1342 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1343 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1346 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1347 * intialized to zero for non DCB mode otherwise actual total RX PB
1348 * would be bigger than programmed and filter space would run into
1351 for (i = 1; i < 8; i++)
1352 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1354 /* Send interrupt when 64 filters are left */
1355 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1358 case IXGBE_FDIR_PBALLOC_64K:
1359 /* 2k - 1 perfect filters */
1360 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1362 case IXGBE_FDIR_PBALLOC_128K:
1363 /* 4k - 1 perfect filters */
1364 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1366 case IXGBE_FDIR_PBALLOC_256K:
1367 /* 8k - 1 perfect filters */
1368 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1372 return IXGBE_ERR_CONFIG;
1375 /* Turn perfect match filtering on */
1376 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1377 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1379 /* Move the flexible bytes to use the ethertype - shift 6 words */
1380 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1382 /* Prime the keys for hashing */
1383 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1384 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
1385 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1386 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
1389 * Poll init-done after we write the register. Estimated times:
1390 * 10G: PBALLOC = 11b, timing is 60us
1391 * 1G: PBALLOC = 11b, timing is 600us
1392 * 100M: PBALLOC = 11b, timing is 6ms
1394 * Multiple these timings by 4 if under full Rx load
1396 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1397 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1398 * this might not finish in our poll time, but we can live with that
1402 /* Set the maximum length per hash bucket to 0xA filters */
1403 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1405 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1406 IXGBE_WRITE_FLUSH(hw);
1407 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1408 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1409 IXGBE_FDIRCTRL_INIT_DONE)
1413 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1414 DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
1416 return IXGBE_SUCCESS;
1421 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1422 * @stream: input bitstream to compute the hash on
1423 * @key: 32-bit hash key
1425 u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
1428 * The algorithm is as follows:
1429 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1430 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1431 * and A[n] x B[n] is bitwise AND between same length strings
1433 * K[n] is 16 bits, defined as:
1434 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1435 * for n modulo 32 < 15, K[n] =
1436 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1438 * S[n] is 16 bits, defined as:
1439 * for n >= 15, S[n] = S[n:n - 15]
1440 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1442 * To simplify for programming, the algorithm is implemented
1443 * in software this way:
1445 * Key[31:0], Stream[335:0]
1447 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
1448 * int_key[350:0] = tmp_key[351:1]
1449 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1452 * for (i = 0; i < 351; i++) {
1454 * hash ^= int_stream[(i + 15):i];
1463 u8 *stream = (u8 *)atr_input;
1464 u8 int_key[44]; /* upper-most bit unused */
1465 u8 hash_str[46]; /* upper-most 2 bits unused */
1466 u16 hash_result = 0;
1470 memset(&tmp_key, 0, sizeof(tmp_key));
1471 /* First load the temporary key stream */
1472 for (i = 0; i < 11; i++)
1473 tmp_key.key[i] = key;
1476 * Set the interim key for the hashing. Bit 352 is unused, so we must
1477 * shift and compensate when building the key.
1479 int_key[0] = tmp_key.key_stream[0] >> 1;
1480 for (i = 1, j = 0; i < 44; i++) {
1481 int_key[i] = (tmp_key.key_stream[j] & 0x1) << 7;
1483 int_key[i] |= tmp_key.key_stream[j] >> 1;
1487 * Set the interim bit string for the hashing. Bits 368 and 367 are
1488 * unused, so shift and compensate when building the string.
1490 hash_str[0] = (stream[40] & 0x7f) >> 1;
1491 for (i = 1, j = 40; i < 46; i++) {
1492 hash_str[i] = (stream[j] & 0x1) << 7;
1496 hash_str[i] |= stream[j] >> 1;
1500 * Now compute the hash. i is the index into hash_str, j is into our
1501 * key stream, k is counting the number of bits, and h interates within
1504 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
1505 for (h = 0; h < 8 && k < 351; h++, k++) {
1506 if ((int_key[j] >> h) & 0x1) {
1508 * Key bit is set, XOR in the current 16-bit
1509 * string. Example of processing:
1511 * tmp = (hash_str[i - 2] & 0 << 16) |
1512 * (hash_str[i - 1] & 0xff << 8) |
1513 * (hash_str[i] & 0xff >> 0)
1514 * So tmp = hash_str[15 + k:k], since the
1515 * i + 2 clause rolls off the 16-bit value
1517 * tmp = (hash_str[i - 2] & 0x7f << 9) |
1518 * (hash_str[i - 1] & 0xff << 1) |
1519 * (hash_str[i] & 0x80 >> 7)
1521 tmp = ((hash_str[i] & (0xff << h)) >> h);
1522 tmp |= ((hash_str[i - 1] & 0xff) << (8 - h));
1523 tmp |= (hash_str[i - 2] & (0xff >> (8 - h)))
1534 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1535 * @input: input stream to modify
1536 * @vlan: the VLAN id to load
1538 s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1540 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1541 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1543 return IXGBE_SUCCESS;
1547 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1548 * @input: input stream to modify
1549 * @src_addr: the IP address to load
1551 s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1553 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1554 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1555 (src_addr >> 16) & 0xff;
1556 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1557 (src_addr >> 8) & 0xff;
1558 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1560 return IXGBE_SUCCESS;
1564 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1565 * @input: input stream to modify
1566 * @dst_addr: the IP address to load
1568 s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1570 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1571 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1572 (dst_addr >> 16) & 0xff;
1573 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1574 (dst_addr >> 8) & 0xff;
1575 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1577 return IXGBE_SUCCESS;
1581 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1582 * @input: input stream to modify
1583 * @src_addr_1: the first 4 bytes of the IP address to load
1584 * @src_addr_2: the second 4 bytes of the IP address to load
1585 * @src_addr_3: the third 4 bytes of the IP address to load
1586 * @src_addr_4: the fourth 4 bytes of the IP address to load
1588 s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1589 u32 src_addr_1, u32 src_addr_2,
1590 u32 src_addr_3, u32 src_addr_4)
1592 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1593 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
1594 (src_addr_4 >> 8) & 0xff;
1595 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
1596 (src_addr_4 >> 16) & 0xff;
1597 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
1599 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
1600 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
1601 (src_addr_3 >> 8) & 0xff;
1602 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
1603 (src_addr_3 >> 16) & 0xff;
1604 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
1606 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
1607 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
1608 (src_addr_2 >> 8) & 0xff;
1609 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
1610 (src_addr_2 >> 16) & 0xff;
1611 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
1613 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
1614 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
1615 (src_addr_1 >> 8) & 0xff;
1616 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
1617 (src_addr_1 >> 16) & 0xff;
1618 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
1620 return IXGBE_SUCCESS;
1624 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1625 * @input: input stream to modify
1626 * @dst_addr_1: the first 4 bytes of the IP address to load
1627 * @dst_addr_2: the second 4 bytes of the IP address to load
1628 * @dst_addr_3: the third 4 bytes of the IP address to load
1629 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1631 s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1632 u32 dst_addr_1, u32 dst_addr_2,
1633 u32 dst_addr_3, u32 dst_addr_4)
1635 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1636 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
1637 (dst_addr_4 >> 8) & 0xff;
1638 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
1639 (dst_addr_4 >> 16) & 0xff;
1640 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
1642 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
1643 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
1644 (dst_addr_3 >> 8) & 0xff;
1645 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
1646 (dst_addr_3 >> 16) & 0xff;
1647 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
1649 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
1650 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
1651 (dst_addr_2 >> 8) & 0xff;
1652 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
1653 (dst_addr_2 >> 16) & 0xff;
1654 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
1656 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
1657 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
1658 (dst_addr_1 >> 8) & 0xff;
1659 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
1660 (dst_addr_1 >> 16) & 0xff;
1661 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
1663 return IXGBE_SUCCESS;
1667 * ixgbe_atr_set_src_port_82599 - Sets the source port
1668 * @input: input stream to modify
1669 * @src_port: the source port to load
1671 s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1673 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1674 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1676 return IXGBE_SUCCESS;
1680 * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1681 * @input: input stream to modify
1682 * @dst_port: the destination port to load
1684 s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1686 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1687 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1689 return IXGBE_SUCCESS;
1693 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1694 * @input: input stream to modify
1695 * @flex_bytes: the flexible bytes to load
1697 s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1699 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1700 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1702 return IXGBE_SUCCESS;
1706 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1707 * @input: input stream to modify
1708 * @vm_pool: the Virtual Machine pool to load
1710 s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
1712 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1714 return IXGBE_SUCCESS;
1718 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1719 * @input: input stream to modify
1720 * @l4type: the layer 4 type value to load
1722 s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1724 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1726 return IXGBE_SUCCESS;
1730 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1731 * @input: input stream to search
1732 * @vlan: the VLAN id to load
1734 s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1736 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1737 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1739 return IXGBE_SUCCESS;
1743 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1744 * @input: input stream to search
1745 * @src_addr: the IP address to load
1747 s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
1749 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1750 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1751 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1752 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1754 return IXGBE_SUCCESS;
1758 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
1759 * @input: input stream to search
1760 * @dst_addr: the IP address to load
1762 s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
1764 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1765 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1766 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1767 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1769 return IXGBE_SUCCESS;
1773 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
1774 * @input: input stream to search
1775 * @src_addr_1: the first 4 bytes of the IP address to load
1776 * @src_addr_2: the second 4 bytes of the IP address to load
1777 * @src_addr_3: the third 4 bytes of the IP address to load
1778 * @src_addr_4: the fourth 4 bytes of the IP address to load
1780 s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1781 u32 *src_addr_1, u32 *src_addr_2,
1782 u32 *src_addr_3, u32 *src_addr_4)
1784 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
1785 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
1786 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
1787 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
1789 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
1790 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
1791 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
1792 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
1794 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
1795 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1796 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1797 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1799 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1800 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1801 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1802 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1804 return IXGBE_SUCCESS;
1808 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
1809 * @input: input stream to search
1810 * @dst_addr_1: the first 4 bytes of the IP address to load
1811 * @dst_addr_2: the second 4 bytes of the IP address to load
1812 * @dst_addr_3: the third 4 bytes of the IP address to load
1813 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1815 s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
1816 u32 *dst_addr_1, u32 *dst_addr_2,
1817 u32 *dst_addr_3, u32 *dst_addr_4)
1819 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
1820 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
1821 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
1822 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
1824 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
1825 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
1826 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
1827 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
1829 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
1830 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
1831 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
1832 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
1834 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
1835 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
1836 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
1837 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
1839 return IXGBE_SUCCESS;
1843 * ixgbe_atr_get_src_port_82599 - Gets the source port
1844 * @input: input stream to modify
1845 * @src_port: the source port to load
1847 * Even though the input is given in big-endian, the FDIRPORT registers
1848 * expect the ports to be programmed in little-endian. Hence the need to swap
1849 * endianness when retrieving the data. This can be confusing since the
1850 * internal hash engine expects it to be big-endian.
1852 s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
1854 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
1855 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
1857 return IXGBE_SUCCESS;
1861 * ixgbe_atr_get_dst_port_82599 - Gets the destination port
1862 * @input: input stream to modify
1863 * @dst_port: the destination port to load
1865 * Even though the input is given in big-endian, the FDIRPORT registers
1866 * expect the ports to be programmed in little-endian. Hence the need to swap
1867 * endianness when retrieving the data. This can be confusing since the
1868 * internal hash engine expects it to be big-endian.
1870 s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
1872 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1873 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1875 return IXGBE_SUCCESS;
1879 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
1880 * @input: input stream to modify
1881 * @flex_bytes: the flexible bytes to load
1883 s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
1885 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1886 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1888 return IXGBE_SUCCESS;
1892 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
1893 * @input: input stream to modify
1894 * @vm_pool: the Virtual Machine pool to load
1896 s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
1898 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
1900 return IXGBE_SUCCESS;
1904 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
1905 * @input: input stream to modify
1906 * @l4type: the layer 4 type value to load
1908 s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
1910 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
1912 return IXGBE_SUCCESS;
1916 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1917 * @hw: pointer to hardware structure
1918 * @stream: input bitstream
1919 * @queue: queue index to direct traffic to
1921 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1922 struct ixgbe_atr_input *input,
1928 u16 bucket_hash, sig_hash;
1931 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1932 IXGBE_ATR_BUCKET_HASH_KEY);
1934 /* bucket_hash is only 15 bits */
1935 bucket_hash &= IXGBE_ATR_HASH_MASK;
1937 sig_hash = ixgbe_atr_compute_hash_82599(input,
1938 IXGBE_ATR_SIGNATURE_HASH_KEY);
1940 /* Get the l4type in order to program FDIRCMD properly */
1941 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1942 ixgbe_atr_get_l4type_82599(input, &l4type);
1945 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1946 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1948 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1950 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1951 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
1953 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1954 case IXGBE_ATR_L4TYPE_TCP:
1955 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1957 case IXGBE_ATR_L4TYPE_UDP:
1958 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1960 case IXGBE_ATR_L4TYPE_SCTP:
1961 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1964 DEBUGOUT(" Error on l4type input\n");
1965 return IXGBE_ERR_CONFIG;
1968 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
1969 fdircmd |= IXGBE_FDIRCMD_IPV6;
1971 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
1972 fdirhashcmd = ((fdircmd << 32) | fdirhash);
1974 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
1975 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1977 return IXGBE_SUCCESS;
1981 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1982 * @hw: pointer to hardware structure
1983 * @input: input bitstream
1984 * @queue: queue index to direct traffic to
1986 * Note that the caller to this function must lock before calling, since the
1987 * hardware writes must be protected from one another.
1989 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1990 struct ixgbe_atr_input *input,
1996 u32 src_ipv4, dst_ipv4;
1997 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
1998 u16 src_port, dst_port, vlan_id, flex_bytes;
2002 /* Get our input values */
2003 ixgbe_atr_get_l4type_82599(input, &l4type);
2006 * Check l4type formatting, and bail out before we touch the hardware
2007 * if there's a configuration issue
2009 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
2010 case IXGBE_ATR_L4TYPE_TCP:
2011 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
2013 case IXGBE_ATR_L4TYPE_UDP:
2014 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
2016 case IXGBE_ATR_L4TYPE_SCTP:
2017 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
2020 DEBUGOUT(" Error on l4type input\n");
2021 return IXGBE_ERR_CONFIG;
2024 bucket_hash = ixgbe_atr_compute_hash_82599(input,
2025 IXGBE_ATR_BUCKET_HASH_KEY);
2027 /* bucket_hash is only 15 bits */
2028 bucket_hash &= IXGBE_ATR_HASH_MASK;
2030 ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
2031 ixgbe_atr_get_src_port_82599(input, &src_port);
2032 ixgbe_atr_get_dst_port_82599(input, &dst_port);
2033 ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
2035 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
2037 /* Now figure out if we're IPv4 or IPv6 */
2038 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
2040 ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
2041 &src_ipv6_3, &src_ipv6_4);
2043 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
2044 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
2045 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
2046 /* The last 4 bytes is the same register as IPv4 */
2047 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
2049 fdircmd |= IXGBE_FDIRCMD_IPV6;
2050 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
2053 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
2054 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
2058 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
2059 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
2061 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
2062 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
2063 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
2064 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
2066 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
2067 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
2068 fdircmd |= IXGBE_FDIRCMD_LAST;
2069 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
2070 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
2072 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2073 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
2075 return IXGBE_SUCCESS;
2079 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2080 * @hw: pointer to hardware structure
2081 * @reg: analog register to read
2084 * Performs read operation to Omer analog register specified.
2086 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2090 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2092 IXGBE_WRITE_FLUSH(hw);
2094 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2095 *val = (u8)core_ctl;
2097 return IXGBE_SUCCESS;
2101 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2102 * @hw: pointer to hardware structure
2103 * @reg: atlas register to write
2104 * @val: value to write
2106 * Performs write operation to Omer analog register specified.
2108 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2112 core_ctl = (reg << 8) | val;
2113 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2114 IXGBE_WRITE_FLUSH(hw);
2117 return IXGBE_SUCCESS;
2121 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
2122 * @hw: pointer to hardware structure
2124 * Starts the hardware using the generic start_hw function.
2125 * Then performs revision-specific operations:
2126 * Clears the rate limiter registers.
2128 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
2131 s32 ret_val = IXGBE_SUCCESS;
2133 ret_val = ixgbe_start_hw_generic(hw);
2135 /* Clear the rate limiters */
2136 for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) {
2137 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num);
2138 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
2140 IXGBE_WRITE_FLUSH(hw);
2146 * ixgbe_identify_phy_82599 - Get physical layer module
2147 * @hw: pointer to hardware structure
2149 * Determines the physical layer module found on the current adapter.
2150 * If PHY already detected, maintains current PHY type in hw struct,
2151 * otherwise executes the PHY detection routine.
2153 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2155 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2157 /* Detect PHY if not unknown - returns success if already detected. */
2158 status = ixgbe_identify_phy_generic(hw);
2159 if (status != IXGBE_SUCCESS)
2160 status = ixgbe_identify_sfp_module_generic(hw);
2161 /* Set PHY type none if no PHY detected */
2162 if (hw->phy.type == ixgbe_phy_unknown) {
2163 hw->phy.type = ixgbe_phy_none;
2164 status = IXGBE_SUCCESS;
2167 /* Return error if SFP module has been detected but is not supported */
2168 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2169 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2175 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2176 * @hw: pointer to hardware structure
2178 * Determines physical layer capabilities of the current configuration.
2180 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2182 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2183 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2184 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2185 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2186 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2187 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2188 u16 ext_ability = 0;
2189 u8 comp_codes_10g = 0;
2191 hw->phy.ops.identify(hw);
2193 if (hw->phy.type == ixgbe_phy_tn ||
2194 hw->phy.type == ixgbe_phy_aq ||
2195 hw->phy.type == ixgbe_phy_cu_unknown) {
2196 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2197 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2198 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2199 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2200 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2201 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2202 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2203 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2207 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2208 case IXGBE_AUTOC_LMS_1G_AN:
2209 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2210 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2211 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2212 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2215 /* SFI mode so read SFP module */
2218 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2219 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2220 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2221 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2222 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2225 case IXGBE_AUTOC_LMS_10G_SERIAL:
2226 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2227 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2229 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2232 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2233 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2234 if (autoc & IXGBE_AUTOC_KX_SUPP)
2235 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2236 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2237 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2238 if (autoc & IXGBE_AUTOC_KR_SUPP)
2239 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2248 /* SFP check must be done last since DA modules are sometimes used to
2249 * test KR mode - we need to id KR mode correctly before SFP module.
2250 * Call identify_sfp because the pluggable module may have changed */
2251 hw->phy.ops.identify_sfp(hw);
2252 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2255 switch (hw->phy.type) {
2256 case ixgbe_phy_tw_tyco:
2257 case ixgbe_phy_tw_unknown:
2258 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2260 case ixgbe_phy_sfp_avago:
2261 case ixgbe_phy_sfp_ftl:
2262 case ixgbe_phy_sfp_intel:
2263 case ixgbe_phy_sfp_unknown:
2264 hw->phy.ops.read_i2c_eeprom(hw,
2265 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2266 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2267 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2268 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2269 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2276 return physical_layer;
2280 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2281 * @hw: pointer to hardware structure
2282 * @regval: register value to write to RXCTRL
2284 * Enables the Rx DMA unit for 82599
2286 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2288 #define IXGBE_MAX_SECRX_POLL 30
2293 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2294 * If traffic is incoming before we enable the Rx unit, it could hang
2295 * the Rx DMA unit. Therefore, make sure the security engine is
2296 * completely disabled prior to enabling the Rx unit.
2298 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2299 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2300 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2301 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2302 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2303 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2306 /* Use interrupt-safe sleep just in case */
2310 /* For informational purposes only */
2311 if (i >= IXGBE_MAX_SECRX_POLL)
2312 DEBUGOUT("Rx unit being enabled before security "
2313 "path fully disabled. Continuing with init.\n");
2315 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2316 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2317 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2318 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2319 IXGBE_WRITE_FLUSH(hw);
2321 return IXGBE_SUCCESS;
2325 * ixgbe_get_device_caps_82599 - Get additional device capabilities
2326 * @hw: pointer to hardware structure
2327 * @device_caps: the EEPROM word with the extra device capabilities
2329 * This function will read the EEPROM location for the device capabilities,
2330 * and return the word through device_caps.
2332 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2334 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
2336 return IXGBE_SUCCESS;
2340 * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
2341 * @hw: pointer to hardware structure
2342 * @san_mac_offset: SAN MAC address offset
2344 * This function will read the EEPROM location for the SAN MAC address
2345 * pointer, and returns the value at that location. This is used in both
2346 * get and set mac_addr routines.
2348 s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
2349 u16 *san_mac_offset)
2352 * First read the EEPROM pointer to see if the MAC addresses are
2355 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2357 return IXGBE_SUCCESS;
2361 * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
2362 * @hw: pointer to hardware structure
2363 * @san_mac_addr: SAN MAC address
2365 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2366 * per-port, so set_lan_id() must be called before reading the addresses.
2367 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2368 * upon for non-SFP connections, so we must call it here.
2370 s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
2372 u16 san_mac_data, san_mac_offset;
2376 * First read the EEPROM pointer to see if the MAC addresses are
2377 * available. If they're not, no point in calling set_lan_id() here.
2379 ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
2381 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2383 * No addresses available in this EEPROM. It's not an
2384 * error though, so just wipe the local address and return.
2386 for (i = 0; i < 6; i++)
2387 san_mac_addr[i] = 0xFF;
2389 goto san_mac_addr_out;
2392 /* make sure we know which port we need to program */
2393 hw->mac.ops.set_lan_id(hw);
2394 /* apply the port offset to the address offset */
2395 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2396 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2397 for (i = 0; i < 3; i++) {
2398 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2399 san_mac_addr[i * 2] = (u8)(san_mac_data);
2400 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2405 return IXGBE_SUCCESS;
2409 * ixgbe_set_san_mac_addr_82599 - Write the SAN MAC address to the EEPROM
2410 * @hw: pointer to hardware structure
2411 * @san_mac_addr: SAN MAC address
2413 * Write a SAN MAC address to the EEPROM.
2415 s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
2417 s32 status = IXGBE_SUCCESS;
2418 u16 san_mac_data, san_mac_offset;
2421 /* Look for SAN mac address pointer. If not defined, return */
2422 ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
2424 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2425 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
2426 goto san_mac_addr_out;
2429 /* Make sure we know which port we need to write */
2430 hw->mac.ops.set_lan_id(hw);
2431 /* Apply the port offset to the address offset */
2432 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2433 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2435 for (i = 0; i < 3; i++) {
2436 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
2437 san_mac_data |= (u16)(san_mac_addr[i * 2]);
2438 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);