1 /******************************************************************************
3 Copyright (c) 2001-2011, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * 82562G 10/100 Network Connection
37 * 82562G-2 10/100 Network Connection
38 * 82562GT 10/100 Network Connection
39 * 82562GT-2 10/100 Network Connection
40 * 82562V 10/100 Network Connection
41 * 82562V-2 10/100 Network Connection
42 * 82566DC-2 Gigabit Network Connection
43 * 82566DC Gigabit Network Connection
44 * 82566DM-2 Gigabit Network Connection
45 * 82566DM Gigabit Network Connection
46 * 82566MC Gigabit Network Connection
47 * 82566MM Gigabit Network Connection
48 * 82567LM Gigabit Network Connection
49 * 82567LF Gigabit Network Connection
50 * 82567V Gigabit Network Connection
51 * 82567LM-2 Gigabit Network Connection
52 * 82567LF-2 Gigabit Network Connection
53 * 82567V-2 Gigabit Network Connection
54 * 82567LF-3 Gigabit Network Connection
55 * 82567LM-3 Gigabit Network Connection
56 * 82567LM-4 Gigabit Network Connection
57 * 82577LM Gigabit Network Connection
58 * 82577LC Gigabit Network Connection
59 * 82578DM Gigabit Network Connection
60 * 82578DC Gigabit Network Connection
61 * 82579LM Gigabit Network Connection
62 * 82579V Gigabit Network Connection
65 #include "e1000_api.h"
67 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
81 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
82 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
83 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
84 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
86 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
88 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
89 u16 words, u16 *data);
90 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
91 u16 words, u16 *data);
92 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
93 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
94 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
96 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
97 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
98 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
99 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
100 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
101 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
102 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
103 u16 *speed, u16 *duplex);
104 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
105 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
107 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
108 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
109 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
110 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
111 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
112 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
113 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
114 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
115 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
116 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
117 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
118 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
119 u32 offset, u8 *data);
120 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
122 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
123 u32 offset, u16 *data);
124 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
125 u32 offset, u8 byte);
126 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
127 u32 offset, u8 data);
128 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
130 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
131 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
133 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
134 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
135 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138 #if defined(NAHUM6_HW) && (defined(LTR_SUPPORT) || defined(OBFF_SUPPORT))
140 #endif /* NAHUM6_HW && (LTR_SUPPORT || OBFF_SUPPORT) */
142 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
143 /* Offset 04h HSFSTS */
144 union ich8_hws_flash_status {
146 u16 flcdone:1; /* bit 0 Flash Cycle Done */
147 u16 flcerr:1; /* bit 1 Flash Cycle Error */
148 u16 dael:1; /* bit 2 Direct Access error Log */
149 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
150 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
151 u16 reserved1:2; /* bit 13:6 Reserved */
152 u16 reserved2:6; /* bit 13:6 Reserved */
153 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
154 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
159 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
160 /* Offset 06h FLCTL */
161 union ich8_hws_flash_ctrl {
162 struct ich8_hsflctl {
163 u16 flcgo:1; /* 0 Flash Cycle Go */
164 u16 flcycle:2; /* 2:1 Flash Cycle */
165 u16 reserved:5; /* 7:3 Reserved */
166 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
167 u16 flockdn:6; /* 15:10 Reserved */
172 /* ICH Flash Region Access Permissions */
173 union ich8_hws_flash_regacc {
175 u32 grra:8; /* 0:7 GbE region Read Access */
176 u32 grwa:8; /* 8:15 GbE region Write Access */
177 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
178 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
183 static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
187 DEBUGFUNC("e1000_toggle_lanphypc_value_ich8lan");
189 ctrl = E1000_READ_REG(hw, E1000_CTRL);
190 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
191 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
192 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
193 E1000_WRITE_FLUSH(hw);
195 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
196 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
200 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
201 * @hw: pointer to the HW structure
203 * Initialize family-specific PHY parameters and function pointers.
205 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
207 struct e1000_phy_info *phy = &hw->phy;
208 s32 ret_val = E1000_SUCCESS;
210 DEBUGFUNC("e1000_init_phy_params_pchlan");
213 phy->reset_delay_us = 100;
215 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
216 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
217 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
218 phy->ops.set_page = e1000_set_page_igp;
219 phy->ops.read_reg = e1000_read_phy_reg_hv;
220 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
221 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
222 phy->ops.release = e1000_release_swflag_ich8lan;
223 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
224 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
225 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
226 phy->ops.write_reg = e1000_write_phy_reg_hv;
227 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
228 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
229 phy->ops.power_up = e1000_power_up_phy_copper;
230 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
231 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
233 if (!hw->phy.ops.check_reset_block(hw)) {
234 u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
237 * The MAC-PHY interconnect may still be in SMBus mode after
238 * Sx->S0. If resetting the PHY is not blocked, toggle the
239 * LANPHYPC Value bit to force the interconnect to PCIe mode.
241 e1000_toggle_lanphypc_value_ich8lan(hw);
245 * Gate automatic PHY configuration by hardware on
248 if ((hw->mac.type == e1000_pch2lan) &&
249 !(fwsm & E1000_ICH_FWSM_FW_VALID))
250 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
253 * Reset the PHY before any access to it. Doing so, ensures
254 * that the PHY is in a known good state before we read/write
255 * PHY registers. The generic reset is sufficient here,
256 * because we haven't determined the PHY type yet.
258 ret_val = e1000_phy_hw_reset_generic(hw);
262 /* Ungate automatic PHY configuration on non-managed 82579 */
263 if ((hw->mac.type == e1000_pch2lan) &&
264 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
266 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
270 phy->id = e1000_phy_unknown;
271 switch (hw->mac.type) {
273 ret_val = e1000_get_phy_id(hw);
276 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
281 * In case the PHY needs to be in mdio slow mode,
282 * set slow mode and try to get the PHY id again.
284 ret_val = e1000_set_mdio_slow_mode_hv(hw);
287 ret_val = e1000_get_phy_id(hw);
292 phy->type = e1000_get_phy_type_from_id(phy->id);
295 case e1000_phy_82577:
296 case e1000_phy_82579:
297 phy->ops.check_polarity = e1000_check_polarity_82577;
298 phy->ops.force_speed_duplex =
299 e1000_phy_force_speed_duplex_82577;
300 phy->ops.get_cable_length = e1000_get_cable_length_82577;
301 phy->ops.get_info = e1000_get_phy_info_82577;
302 phy->ops.commit = e1000_phy_sw_reset_generic;
304 case e1000_phy_82578:
305 phy->ops.check_polarity = e1000_check_polarity_m88;
306 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
307 phy->ops.get_cable_length = e1000_get_cable_length_m88;
308 phy->ops.get_info = e1000_get_phy_info_m88;
311 ret_val = -E1000_ERR_PHY;
320 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
321 * @hw: pointer to the HW structure
323 * Initialize family-specific PHY parameters and function pointers.
325 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
327 struct e1000_phy_info *phy = &hw->phy;
328 s32 ret_val = E1000_SUCCESS;
331 DEBUGFUNC("e1000_init_phy_params_ich8lan");
334 phy->reset_delay_us = 100;
336 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
337 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
338 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
339 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
340 phy->ops.read_reg = e1000_read_phy_reg_igp;
341 phy->ops.release = e1000_release_swflag_ich8lan;
342 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
343 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
344 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
345 phy->ops.write_reg = e1000_write_phy_reg_igp;
346 phy->ops.power_up = e1000_power_up_phy_copper;
347 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
350 * We may need to do this twice - once for IGP and if that fails,
351 * we'll set BM func pointers and try again
353 ret_val = e1000_determine_phy_address(hw);
355 phy->ops.write_reg = e1000_write_phy_reg_bm;
356 phy->ops.read_reg = e1000_read_phy_reg_bm;
357 ret_val = e1000_determine_phy_address(hw);
359 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
365 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
368 ret_val = e1000_get_phy_id(hw);
375 case IGP03E1000_E_PHY_ID:
376 phy->type = e1000_phy_igp_3;
377 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
378 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
379 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
380 phy->ops.get_info = e1000_get_phy_info_igp;
381 phy->ops.check_polarity = e1000_check_polarity_igp;
382 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
385 case IFE_PLUS_E_PHY_ID:
387 phy->type = e1000_phy_ife;
388 phy->autoneg_mask = E1000_ALL_NOT_GIG;
389 phy->ops.get_info = e1000_get_phy_info_ife;
390 phy->ops.check_polarity = e1000_check_polarity_ife;
391 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
393 case BME1000_E_PHY_ID:
394 phy->type = e1000_phy_bm;
395 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
396 phy->ops.read_reg = e1000_read_phy_reg_bm;
397 phy->ops.write_reg = e1000_write_phy_reg_bm;
398 phy->ops.commit = e1000_phy_sw_reset_generic;
399 phy->ops.get_info = e1000_get_phy_info_m88;
400 phy->ops.check_polarity = e1000_check_polarity_m88;
401 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
404 ret_val = -E1000_ERR_PHY;
413 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
414 * @hw: pointer to the HW structure
416 * Initialize family-specific NVM parameters and function
419 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
421 struct e1000_nvm_info *nvm = &hw->nvm;
422 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
423 u32 gfpreg, sector_base_addr, sector_end_addr;
424 s32 ret_val = E1000_SUCCESS;
427 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
429 /* Can't read flash registers if the register set isn't mapped. */
430 if (!hw->flash_address) {
431 DEBUGOUT("ERROR: Flash registers not mapped\n");
432 ret_val = -E1000_ERR_CONFIG;
436 nvm->type = e1000_nvm_flash_sw;
438 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
441 * sector_X_addr is a "sector"-aligned address (4096 bytes)
442 * Add 1 to sector_end_addr since this sector is included in
445 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
446 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
448 /* flash_base_addr is byte-aligned */
449 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
452 * find total size of the NVM, then cut in half since the total
453 * size represents two separate NVM banks.
455 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
456 << FLASH_SECTOR_ADDR_SHIFT;
457 nvm->flash_bank_size /= 2;
458 /* Adjust to word count */
459 nvm->flash_bank_size /= sizeof(u16);
461 nvm->word_size = E1000_SHADOW_RAM_WORDS;
463 /* Clear shadow ram */
464 for (i = 0; i < nvm->word_size; i++) {
465 dev_spec->shadow_ram[i].modified = FALSE;
466 dev_spec->shadow_ram[i].value = 0xFFFF;
469 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
470 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
472 /* Function Pointers */
473 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
474 nvm->ops.release = e1000_release_nvm_ich8lan;
475 nvm->ops.read = e1000_read_nvm_ich8lan;
476 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
477 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
478 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
479 nvm->ops.write = e1000_write_nvm_ich8lan;
486 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
487 * @hw: pointer to the HW structure
489 * Initialize family-specific MAC parameters and function
492 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
494 struct e1000_mac_info *mac = &hw->mac;
496 DEBUGFUNC("e1000_init_mac_params_ich8lan");
498 /* Set media type function pointer */
499 hw->phy.media_type = e1000_media_type_copper;
501 /* Set mta register count */
502 mac->mta_reg_count = 32;
503 /* Set rar entry count */
504 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
505 if (mac->type == e1000_ich8lan)
506 mac->rar_entry_count--;
507 /* Set if part includes ASF firmware */
508 mac->asf_firmware_present = TRUE;
510 mac->has_fwsm = TRUE;
511 /* ARC subsystem not supported */
512 mac->arc_subsystem_valid = FALSE;
513 /* Adaptive IFS supported */
514 mac->adaptive_ifs = TRUE;
516 /* Function pointers */
518 /* bus type/speed/width */
519 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
521 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
523 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
524 /* hw initialization */
525 mac->ops.init_hw = e1000_init_hw_ich8lan;
527 mac->ops.setup_link = e1000_setup_link_ich8lan;
528 /* physical interface setup */
529 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
531 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
533 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
534 /* multicast address update */
535 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
536 /* clear hardware counters */
537 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
544 /* check management mode */
545 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
547 mac->ops.id_led_init = e1000_id_led_init_generic;
549 mac->ops.blink_led = e1000_blink_led_generic;
551 mac->ops.setup_led = e1000_setup_led_generic;
553 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
554 /* turn on/off LED */
555 mac->ops.led_on = e1000_led_on_ich8lan;
556 mac->ops.led_off = e1000_led_off_ich8lan;
559 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
560 mac->ops.rar_set = e1000_rar_set_pch2lan;
561 /* multicast address update for pch2 */
562 mac->ops.update_mc_addr_list =
563 e1000_update_mc_addr_list_pch2lan;
566 /* check management mode */
567 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
569 mac->ops.id_led_init = e1000_id_led_init_pchlan;
571 mac->ops.setup_led = e1000_setup_led_pchlan;
573 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
574 /* turn on/off LED */
575 mac->ops.led_on = e1000_led_on_pchlan;
576 mac->ops.led_off = e1000_led_off_pchlan;
582 #if defined(NAHUM6_HW) && (defined(LTR_SUPPORT) || defined(OBFF_SUPPORT))
583 if (mac->type == e1000_pch_lpt) {
586 #endif /* NAHUM6_HW && (LTR_SUPPORT || OBFF_SUPPORT) */
587 /* Enable PCS Lock-loss workaround for ICH8 */
588 if (mac->type == e1000_ich8lan)
589 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
591 /* Gate automatic PHY configuration by hardware on managed 82579 */
592 if ((mac->type == e1000_pch2lan) &&
593 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
594 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
596 return E1000_SUCCESS;
600 * e1000_set_eee_pchlan - Enable/disable EEE support
601 * @hw: pointer to the HW structure
603 * Enable/disable EEE based on setting in dev_spec structure. The bits in
604 * the LPI Control register will remain set only if/when link is up.
606 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
608 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
609 s32 ret_val = E1000_SUCCESS;
612 DEBUGFUNC("e1000_set_eee_pchlan");
614 if (hw->phy.type != e1000_phy_82579)
617 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
621 if (dev_spec->eee_disable)
622 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
624 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
626 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
632 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
633 * @hw: pointer to the HW structure
635 * Checks to see of the link status of the hardware has changed. If a
636 * change in link status has been detected, then we read the PHY registers
637 * to get the current speed/duplex if link exists.
639 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
641 struct e1000_mac_info *mac = &hw->mac;
646 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
649 * We only want to go out to the PHY registers to see if Auto-Neg
650 * has completed and/or if our link status has changed. The
651 * get_link_status flag is set upon receiving a Link Status
652 * Change or Rx Sequence Error interrupt.
654 if (!mac->get_link_status) {
655 ret_val = E1000_SUCCESS;
660 * First we want to see if the MII Status Register reports
661 * link. If so, then we want to get the current speed/duplex
664 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
668 if (hw->mac.type == e1000_pchlan) {
669 ret_val = e1000_k1_gig_workaround_hv(hw, link);
674 #if defined(NAHUM6_HW) && (defined(LTR_SUPPORT) || defined(OBFF_SUPPORT))
675 if (hw->mac.type == e1000_pch_lpt) {
678 #endif /* NAHUM6_HW && (LTR_SUPPORT || OBFF_SUPPORT) */
680 goto out; /* No link detected */
682 mac->get_link_status = FALSE;
684 switch (hw->mac.type) {
686 ret_val = e1000_k1_workaround_lv(hw);
691 if (hw->phy.type == e1000_phy_82578) {
692 ret_val = e1000_link_stall_workaround_hv(hw);
698 * Workaround for PCHx parts in half-duplex:
699 * Set the number of preambles removed from the packet
700 * when it is passed from the PHY to the MAC to prevent
701 * the MAC from misinterpreting the packet type.
703 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
704 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
706 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
708 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
710 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
717 * Check if there was DownShift, must be checked
718 * immediately after link-up
720 e1000_check_downshift_generic(hw);
722 /* Enable/Disable EEE after link up */
723 ret_val = e1000_set_eee_pchlan(hw);
728 * If we are forcing speed/duplex, then we simply return since
729 * we have already determined whether we have link or not.
732 ret_val = -E1000_ERR_CONFIG;
737 * Auto-Neg is enabled. Auto Speed Detection takes care
738 * of MAC speed/duplex configuration. So we only need to
739 * configure Collision Distance in the MAC.
741 hw->mac.ops.config_collision_dist(hw);
744 * Configure Flow Control now that Auto-Neg has completed.
745 * First, we need to restore the desired flow control
746 * settings because we may have had to re-autoneg with a
747 * different link partner.
749 ret_val = e1000_config_fc_after_link_up_generic(hw);
751 DEBUGOUT("Error configuring flow control\n");
758 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
759 * @hw: pointer to the HW structure
761 * Initialize family-specific function pointers for PHY, MAC, and NVM.
763 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
765 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
767 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
768 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
769 switch (hw->mac.type) {
773 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
777 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
785 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
786 * @hw: pointer to the HW structure
788 * Acquires the mutex for performing NVM operations.
790 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
792 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
794 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
796 return E1000_SUCCESS;
800 * e1000_release_nvm_ich8lan - Release NVM mutex
801 * @hw: pointer to the HW structure
803 * Releases the mutex used while performing NVM operations.
805 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
807 DEBUGFUNC("e1000_release_nvm_ich8lan");
809 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
815 * e1000_acquire_swflag_ich8lan - Acquire software control flag
816 * @hw: pointer to the HW structure
818 * Acquires the software control flag for performing PHY and select
821 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
823 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
824 s32 ret_val = E1000_SUCCESS;
826 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
828 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
831 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
832 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
840 DEBUGOUT("SW has already locked the resource.\n");
841 ret_val = -E1000_ERR_CONFIG;
845 timeout = SW_FLAG_TIMEOUT;
847 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
848 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
851 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
852 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
860 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
861 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
862 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
863 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
864 ret_val = -E1000_ERR_CONFIG;
870 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
876 * e1000_release_swflag_ich8lan - Release software control flag
877 * @hw: pointer to the HW structure
879 * Releases the software control flag for performing PHY and select
882 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
886 DEBUGFUNC("e1000_release_swflag_ich8lan");
888 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
890 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
891 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
892 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
894 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
897 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
903 * e1000_check_mng_mode_ich8lan - Checks management mode
904 * @hw: pointer to the HW structure
906 * This checks if the adapter has any manageability enabled.
907 * This is a function pointer entry point only called by read/write
908 * routines for the PHY and NVM parts.
910 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
914 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
916 fwsm = E1000_READ_REG(hw, E1000_FWSM);
918 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
919 ((fwsm & E1000_FWSM_MODE_MASK) ==
920 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
924 * e1000_check_mng_mode_pchlan - Checks management mode
925 * @hw: pointer to the HW structure
927 * This checks if the adapter has iAMT enabled.
928 * This is a function pointer entry point only called by read/write
929 * routines for the PHY and NVM parts.
931 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
935 DEBUGFUNC("e1000_check_mng_mode_pchlan");
937 fwsm = E1000_READ_REG(hw, E1000_FWSM);
939 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
940 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
944 * e1000_rar_set_pch2lan - Set receive address register
945 * @hw: pointer to the HW structure
946 * @addr: pointer to the receive address
947 * @index: receive address array register
949 * Sets the receive address array register at index to the address passed
950 * in by addr. For 82579, RAR[0] is the base address register that is to
951 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
952 * Use SHRA[0-3] in place of those reserved for ME.
954 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
956 u32 rar_low, rar_high;
958 DEBUGFUNC("e1000_rar_set_pch2lan");
961 * HW expects these in little endian so we reverse the byte order
962 * from network order (big endian) to little endian
964 rar_low = ((u32) addr[0] |
965 ((u32) addr[1] << 8) |
966 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
968 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
970 /* If MAC address zero, no need to set the AV bit */
971 if (rar_low || rar_high)
972 rar_high |= E1000_RAH_AV;
975 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
976 E1000_WRITE_FLUSH(hw);
977 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
978 E1000_WRITE_FLUSH(hw);
982 if (index < hw->mac.rar_entry_count) {
983 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
984 E1000_WRITE_FLUSH(hw);
985 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
986 E1000_WRITE_FLUSH(hw);
988 /* verify the register updates */
989 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
990 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
993 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
994 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
997 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1001 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1002 * @hw: pointer to the HW structure
1003 * @mc_addr_list: array of multicast addresses to program
1004 * @mc_addr_count: number of multicast addresses to program
1006 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1007 * The caller must have a packed mc_addr_list of multicast addresses.
1009 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1017 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1019 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1021 ret_val = hw->phy.ops.acquire(hw);
1025 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1029 for (i = 0; i < hw->mac.mta_reg_count; i++) {
1030 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1031 (u16)(hw->mac.mta_shadow[i] &
1033 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1034 (u16)((hw->mac.mta_shadow[i] >> 16) &
1038 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1041 hw->phy.ops.release(hw);
1045 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1046 * @hw: pointer to the HW structure
1048 * Checks if firmware is blocking the reset of the PHY.
1049 * This is a function pointer entry point only called by
1052 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1056 DEBUGFUNC("e1000_check_reset_block_ich8lan");
1058 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1060 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1061 : E1000_BLK_PHY_RESET;
1065 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1066 * @hw: pointer to the HW structure
1068 * Assumes semaphore already acquired.
1071 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1074 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1075 s32 ret_val = E1000_SUCCESS;
1077 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1079 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1083 phy_data &= ~HV_SMB_ADDR_MASK;
1084 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1085 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1086 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1093 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1094 * @hw: pointer to the HW structure
1096 * SW should configure the LCD from the NVM extended configuration region
1097 * as a workaround for certain parts.
1099 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1101 struct e1000_phy_info *phy = &hw->phy;
1102 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1103 s32 ret_val = E1000_SUCCESS;
1104 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1106 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1109 * Initialize the PHY from the NVM on ICH platforms. This
1110 * is needed due to an issue where the NVM configuration is
1111 * not properly autoloaded after power transitions.
1112 * Therefore, after each PHY reset, we will load the
1113 * configuration data out of the NVM manually.
1115 switch (hw->mac.type) {
1117 if (phy->type != e1000_phy_igp_3)
1120 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1121 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1122 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1128 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1134 ret_val = hw->phy.ops.acquire(hw);
1138 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1139 if (!(data & sw_cfg_mask))
1143 * Make sure HW does not configure LCD from PHY
1144 * extended configuration before SW configuration
1146 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1147 if (!(hw->mac.type == e1000_pch2lan)) {
1148 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1152 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1153 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1154 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1158 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1159 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1161 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1162 (hw->mac.type == e1000_pchlan)) ||
1163 (hw->mac.type == e1000_pch2lan)) {
1165 * HW configures the SMBus address and LEDs when the
1166 * OEM and LCD Write Enable bits are set in the NVM.
1167 * When both NVM bits are cleared, SW will configure
1170 ret_val = e1000_write_smbus_addr(hw);
1174 data = E1000_READ_REG(hw, E1000_LEDCTL);
1175 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1181 /* Configure LCD from extended configuration region. */
1183 /* cnf_base_addr is in DWORD */
1184 word_addr = (u16)(cnf_base_addr << 1);
1186 for (i = 0; i < cnf_size; i++) {
1187 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1192 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1197 /* Save off the PHY page for future writes. */
1198 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1199 phy_page = reg_data;
1203 reg_addr &= PHY_REG_MASK;
1204 reg_addr |= phy_page;
1206 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1213 hw->phy.ops.release(hw);
1218 * e1000_k1_gig_workaround_hv - K1 Si workaround
1219 * @hw: pointer to the HW structure
1220 * @link: link up bool flag
1222 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1223 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1224 * If link is down, the function will restore the default K1 setting located
1227 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1229 s32 ret_val = E1000_SUCCESS;
1231 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1233 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1235 if (hw->mac.type != e1000_pchlan)
1238 /* Wrap the whole flow with the sw flag */
1239 ret_val = hw->phy.ops.acquire(hw);
1243 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1245 if (hw->phy.type == e1000_phy_82578) {
1246 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1251 status_reg &= BM_CS_STATUS_LINK_UP |
1252 BM_CS_STATUS_RESOLVED |
1253 BM_CS_STATUS_SPEED_MASK;
1255 if (status_reg == (BM_CS_STATUS_LINK_UP |
1256 BM_CS_STATUS_RESOLVED |
1257 BM_CS_STATUS_SPEED_1000))
1261 if (hw->phy.type == e1000_phy_82577) {
1262 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1267 status_reg &= HV_M_STATUS_LINK_UP |
1268 HV_M_STATUS_AUTONEG_COMPLETE |
1269 HV_M_STATUS_SPEED_MASK;
1271 if (status_reg == (HV_M_STATUS_LINK_UP |
1272 HV_M_STATUS_AUTONEG_COMPLETE |
1273 HV_M_STATUS_SPEED_1000))
1277 /* Link stall fix for link up */
1278 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1284 /* Link stall fix for link down */
1285 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1291 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1294 hw->phy.ops.release(hw);
1300 * e1000_configure_k1_ich8lan - Configure K1 power state
1301 * @hw: pointer to the HW structure
1302 * @enable: K1 state to configure
1304 * Configure the K1 power state based on the provided parameter.
1305 * Assumes semaphore already acquired.
1307 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1309 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1311 s32 ret_val = E1000_SUCCESS;
1317 DEBUGFUNC("e1000_configure_k1_ich8lan");
1319 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1325 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1327 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1329 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1335 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1336 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1338 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1339 reg |= E1000_CTRL_FRCSPD;
1340 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1342 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1343 E1000_WRITE_FLUSH(hw);
1345 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1346 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1347 E1000_WRITE_FLUSH(hw);
1355 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1356 * @hw: pointer to the HW structure
1357 * @d0_state: boolean if entering d0 or d3 device state
1359 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1360 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1361 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1363 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1369 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1371 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1374 ret_val = hw->phy.ops.acquire(hw);
1378 if (!(hw->mac.type == e1000_pch2lan)) {
1379 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1380 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1384 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1385 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1388 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1390 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1394 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1397 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1398 oem_reg |= HV_OEM_BITS_GBE_DIS;
1400 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1401 oem_reg |= HV_OEM_BITS_LPLU;
1403 /* Set Restart auto-neg to activate the bits */
1404 if (!hw->phy.ops.check_reset_block(hw))
1405 oem_reg |= HV_OEM_BITS_RESTART_AN;
1407 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1408 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1409 oem_reg |= HV_OEM_BITS_GBE_DIS;
1411 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1412 E1000_PHY_CTRL_NOND0A_LPLU))
1413 oem_reg |= HV_OEM_BITS_LPLU;
1416 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1419 hw->phy.ops.release(hw);
1426 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1427 * @hw: pointer to the HW structure
1429 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1434 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1436 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1440 data |= HV_KMRN_MDIO_SLOW;
1442 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1448 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1449 * done after every PHY reset.
1451 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1453 s32 ret_val = E1000_SUCCESS;
1456 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1458 if (hw->mac.type != e1000_pchlan)
1461 /* Set MDIO slow mode before any other MDIO access */
1462 if (hw->phy.type == e1000_phy_82577) {
1463 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1468 if (((hw->phy.type == e1000_phy_82577) &&
1469 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1470 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1471 /* Disable generation of early preamble */
1472 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1476 /* Preamble tuning for SSC */
1477 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
1483 if (hw->phy.type == e1000_phy_82578) {
1485 * Return registers to default by doing a soft reset then
1486 * writing 0x3140 to the control register.
1488 if (hw->phy.revision < 2) {
1489 e1000_phy_sw_reset_generic(hw);
1490 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1496 ret_val = hw->phy.ops.acquire(hw);
1501 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1502 hw->phy.ops.release(hw);
1507 * Configure the K1 Si workaround during phy reset assuming there is
1508 * link so that it disables K1 if link is in 1Gbps.
1510 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1514 /* Workaround for link disconnects on a busy hub in half duplex */
1515 ret_val = hw->phy.ops.acquire(hw);
1518 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1521 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
1524 hw->phy.ops.release(hw);
1530 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1531 * @hw: pointer to the HW structure
1533 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1539 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1541 ret_val = hw->phy.ops.acquire(hw);
1544 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1548 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1549 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1550 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1551 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1552 (u16)(mac_reg & 0xFFFF));
1553 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1554 (u16)((mac_reg >> 16) & 0xFFFF));
1556 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1557 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1558 (u16)(mac_reg & 0xFFFF));
1559 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1560 (u16)((mac_reg & E1000_RAH_AV)
1564 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1567 hw->phy.ops.release(hw);
1570 static u32 e1000_calc_rx_da_crc(u8 mac[])
1572 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1573 u32 i, j, mask, crc;
1575 DEBUGFUNC("e1000_calc_rx_da_crc");
1578 for (i = 0; i < 6; i++) {
1580 for (j = 8; j > 0; j--) {
1581 mask = (crc & 1) * (-1);
1582 crc = (crc >> 1) ^ (poly & mask);
1589 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1591 * @hw: pointer to the HW structure
1592 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1594 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1596 s32 ret_val = E1000_SUCCESS;
1601 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1603 if (hw->mac.type != e1000_pch2lan)
1606 /* disable Rx path while enabling/disabling workaround */
1607 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1608 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
1609 phy_reg | (1 << 14));
1615 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1616 * SHRAL/H) and initial CRC values to the MAC
1618 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1619 u8 mac_addr[ETH_ADDR_LEN] = {0};
1620 u32 addr_high, addr_low;
1622 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1623 if (!(addr_high & E1000_RAH_AV))
1625 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1626 mac_addr[0] = (addr_low & 0xFF);
1627 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1628 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1629 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1630 mac_addr[4] = (addr_high & 0xFF);
1631 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1633 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1634 e1000_calc_rx_da_crc(mac_addr));
1637 /* Write Rx addresses to the PHY */
1638 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1640 /* Enable jumbo frame workaround in the MAC */
1641 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1642 mac_reg &= ~(1 << 14);
1643 mac_reg |= (7 << 15);
1644 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1646 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1647 mac_reg |= E1000_RCTL_SECRC;
1648 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1650 ret_val = e1000_read_kmrn_reg_generic(hw,
1651 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1655 ret_val = e1000_write_kmrn_reg_generic(hw,
1656 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1660 ret_val = e1000_read_kmrn_reg_generic(hw,
1661 E1000_KMRNCTRLSTA_HD_CTRL,
1665 data &= ~(0xF << 8);
1667 ret_val = e1000_write_kmrn_reg_generic(hw,
1668 E1000_KMRNCTRLSTA_HD_CTRL,
1673 /* Enable jumbo frame workaround in the PHY */
1674 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1675 data &= ~(0x7F << 5);
1676 data |= (0x37 << 5);
1677 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1680 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1682 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1685 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1686 data &= ~(0x3FF << 2);
1687 data |= (0x1A << 2);
1688 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1691 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
1694 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1695 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
1700 /* Write MAC register values back to h/w defaults */
1701 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1702 mac_reg &= ~(0xF << 14);
1703 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1705 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1706 mac_reg &= ~E1000_RCTL_SECRC;
1707 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1709 ret_val = e1000_read_kmrn_reg_generic(hw,
1710 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1714 ret_val = e1000_write_kmrn_reg_generic(hw,
1715 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1719 ret_val = e1000_read_kmrn_reg_generic(hw,
1720 E1000_KMRNCTRLSTA_HD_CTRL,
1724 data &= ~(0xF << 8);
1726 ret_val = e1000_write_kmrn_reg_generic(hw,
1727 E1000_KMRNCTRLSTA_HD_CTRL,
1732 /* Write PHY register values back to h/w defaults */
1733 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1734 data &= ~(0x7F << 5);
1735 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1738 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1740 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1743 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1744 data &= ~(0x3FF << 2);
1746 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1749 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1752 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1753 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
1759 /* re-enable Rx path after enabling/disabling workaround */
1760 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
1768 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1769 * done after every PHY reset.
1771 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1773 s32 ret_val = E1000_SUCCESS;
1775 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1777 if (hw->mac.type != e1000_pch2lan)
1780 /* Set MDIO slow mode before any other MDIO access */
1781 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1783 ret_val = hw->phy.ops.acquire(hw);
1786 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1787 I82579_MSE_THRESHOLD);
1790 /* set MSE higher to enable link to stay up when noise is high */
1791 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1795 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1796 I82579_MSE_LINK_DOWN);
1799 /* drop link after 5 times MSE threshold was reached */
1800 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1803 hw->phy.ops.release(hw);
1810 * e1000_k1_gig_workaround_lv - K1 Si workaround
1811 * @hw: pointer to the HW structure
1813 * Workaround to set the K1 beacon duration for 82579 parts
1815 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1817 s32 ret_val = E1000_SUCCESS;
1822 DEBUGFUNC("e1000_k1_workaround_lv");
1824 if (hw->mac.type != e1000_pch2lan)
1827 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1828 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1832 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1833 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1834 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1835 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1837 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
1841 if (status_reg & HV_M_STATUS_SPEED_1000) {
1842 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1843 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1845 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1846 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1848 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1849 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
1857 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1858 * @hw: pointer to the HW structure
1859 * @gate: boolean set to TRUE to gate, FALSE to ungate
1861 * Gate/ungate the automatic PHY configuration via hardware; perform
1862 * the configuration via software instead.
1864 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1868 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1870 if (hw->mac.type != e1000_pch2lan)
1873 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1876 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1878 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1880 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1885 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1886 * @hw: pointer to the HW structure
1888 * Check the appropriate indication the MAC has finished configuring the
1889 * PHY after a software reset.
1891 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1893 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1895 DEBUGFUNC("e1000_lan_init_done_ich8lan");
1897 /* Wait for basic configuration completes before proceeding */
1899 data = E1000_READ_REG(hw, E1000_STATUS);
1900 data &= E1000_STATUS_LAN_INIT_DONE;
1902 } while ((!data) && --loop);
1905 * If basic configuration is incomplete before the above loop
1906 * count reaches 0, loading the configuration from NVM will
1907 * leave the PHY in a bad state possibly resulting in no link.
1910 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1912 /* Clear the Init Done bit for the next init event */
1913 data = E1000_READ_REG(hw, E1000_STATUS);
1914 data &= ~E1000_STATUS_LAN_INIT_DONE;
1915 E1000_WRITE_REG(hw, E1000_STATUS, data);
1919 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1920 * @hw: pointer to the HW structure
1922 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1924 s32 ret_val = E1000_SUCCESS;
1927 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1929 if (hw->phy.ops.check_reset_block(hw))
1932 /* Allow time for h/w to get to quiescent state after reset */
1935 /* Perform any necessary post-reset workarounds */
1936 switch (hw->mac.type) {
1938 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1943 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1951 /* Clear the host wakeup bit after lcd reset */
1952 if (hw->mac.type >= e1000_pchlan) {
1953 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
1954 reg &= ~BM_WUC_HOST_WU_BIT;
1955 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
1958 /* Configure the LCD with the extended configuration region in NVM */
1959 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1963 /* Configure the LCD with the OEM bits in NVM */
1964 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1966 if (hw->mac.type == e1000_pch2lan) {
1967 /* Ungate automatic PHY configuration on non-managed 82579 */
1968 if (!(E1000_READ_REG(hw, E1000_FWSM) &
1969 E1000_ICH_FWSM_FW_VALID)) {
1971 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1974 /* Set EEE LPI Update Timer to 200usec */
1975 ret_val = hw->phy.ops.acquire(hw);
1978 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1979 I82579_LPI_UPDATE_TIMER);
1982 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1985 hw->phy.ops.release(hw);
1993 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1994 * @hw: pointer to the HW structure
1997 * This is a function pointer entry point called by drivers
1998 * or other shared routines.
2000 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2002 s32 ret_val = E1000_SUCCESS;
2004 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2006 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2007 if ((hw->mac.type == e1000_pch2lan) &&
2008 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2009 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
2011 ret_val = e1000_phy_hw_reset_generic(hw);
2015 ret_val = e1000_post_phy_reset_ich8lan(hw);
2022 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2023 * @hw: pointer to the HW structure
2024 * @active: TRUE to enable LPLU, FALSE to disable
2026 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2027 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2028 * the phy speed. This function will manually set the LPLU bit and restart
2029 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2030 * since it configures the same bit.
2032 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2034 s32 ret_val = E1000_SUCCESS;
2037 DEBUGFUNC("e1000_set_lplu_state_pchlan");
2039 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2044 oem_reg |= HV_OEM_BITS_LPLU;
2046 oem_reg &= ~HV_OEM_BITS_LPLU;
2048 if (!hw->phy.ops.check_reset_block(hw))
2049 oem_reg |= HV_OEM_BITS_RESTART_AN;
2051 ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2058 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2059 * @hw: pointer to the HW structure
2060 * @active: TRUE to enable LPLU, FALSE to disable
2062 * Sets the LPLU D0 state according to the active flag. When
2063 * activating LPLU this function also disables smart speed
2064 * and vice versa. LPLU will not be activated unless the
2065 * device autonegotiation advertisement meets standards of
2066 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2067 * This is a function pointer entry point only called by
2068 * PHY setup routines.
2070 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2072 struct e1000_phy_info *phy = &hw->phy;
2074 s32 ret_val = E1000_SUCCESS;
2077 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2079 if (phy->type == e1000_phy_ife)
2082 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2085 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2086 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2088 if (phy->type != e1000_phy_igp_3)
2092 * Call gig speed drop workaround on LPLU before accessing
2095 if (hw->mac.type == e1000_ich8lan)
2096 e1000_gig_downshift_workaround_ich8lan(hw);
2098 /* When LPLU is enabled, we should disable SmartSpeed */
2099 ret_val = phy->ops.read_reg(hw,
2100 IGP01E1000_PHY_PORT_CONFIG,
2102 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2103 ret_val = phy->ops.write_reg(hw,
2104 IGP01E1000_PHY_PORT_CONFIG,
2109 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2110 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2112 if (phy->type != e1000_phy_igp_3)
2116 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2117 * during Dx states where the power conservation is most
2118 * important. During driver activity we should enable
2119 * SmartSpeed, so performance is maintained.
2121 if (phy->smart_speed == e1000_smart_speed_on) {
2122 ret_val = phy->ops.read_reg(hw,
2123 IGP01E1000_PHY_PORT_CONFIG,
2128 data |= IGP01E1000_PSCFR_SMART_SPEED;
2129 ret_val = phy->ops.write_reg(hw,
2130 IGP01E1000_PHY_PORT_CONFIG,
2134 } else if (phy->smart_speed == e1000_smart_speed_off) {
2135 ret_val = phy->ops.read_reg(hw,
2136 IGP01E1000_PHY_PORT_CONFIG,
2141 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2142 ret_val = phy->ops.write_reg(hw,
2143 IGP01E1000_PHY_PORT_CONFIG,
2155 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2156 * @hw: pointer to the HW structure
2157 * @active: TRUE to enable LPLU, FALSE to disable
2159 * Sets the LPLU D3 state according to the active flag. When
2160 * activating LPLU this function also disables smart speed
2161 * and vice versa. LPLU will not be activated unless the
2162 * device autonegotiation advertisement meets standards of
2163 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2164 * This is a function pointer entry point only called by
2165 * PHY setup routines.
2167 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2169 struct e1000_phy_info *phy = &hw->phy;
2171 s32 ret_val = E1000_SUCCESS;
2174 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2176 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2179 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2180 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2182 if (phy->type != e1000_phy_igp_3)
2186 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2187 * during Dx states where the power conservation is most
2188 * important. During driver activity we should enable
2189 * SmartSpeed, so performance is maintained.
2191 if (phy->smart_speed == e1000_smart_speed_on) {
2192 ret_val = phy->ops.read_reg(hw,
2193 IGP01E1000_PHY_PORT_CONFIG,
2198 data |= IGP01E1000_PSCFR_SMART_SPEED;
2199 ret_val = phy->ops.write_reg(hw,
2200 IGP01E1000_PHY_PORT_CONFIG,
2204 } else if (phy->smart_speed == e1000_smart_speed_off) {
2205 ret_val = phy->ops.read_reg(hw,
2206 IGP01E1000_PHY_PORT_CONFIG,
2211 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2212 ret_val = phy->ops.write_reg(hw,
2213 IGP01E1000_PHY_PORT_CONFIG,
2218 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2219 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2220 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2221 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2222 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2224 if (phy->type != e1000_phy_igp_3)
2228 * Call gig speed drop workaround on LPLU before accessing
2231 if (hw->mac.type == e1000_ich8lan)
2232 e1000_gig_downshift_workaround_ich8lan(hw);
2234 /* When LPLU is enabled, we should disable SmartSpeed */
2235 ret_val = phy->ops.read_reg(hw,
2236 IGP01E1000_PHY_PORT_CONFIG,
2241 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2242 ret_val = phy->ops.write_reg(hw,
2243 IGP01E1000_PHY_PORT_CONFIG,
2252 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2253 * @hw: pointer to the HW structure
2254 * @bank: pointer to the variable that returns the active bank
2256 * Reads signature byte from the NVM using the flash access registers.
2257 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2259 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2262 struct e1000_nvm_info *nvm = &hw->nvm;
2263 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2264 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2266 s32 ret_val = E1000_SUCCESS;
2268 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2270 switch (hw->mac.type) {
2273 eecd = E1000_READ_REG(hw, E1000_EECD);
2274 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2275 E1000_EECD_SEC1VAL_VALID_MASK) {
2276 if (eecd & E1000_EECD_SEC1VAL)
2283 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2286 /* set bank to 0 in case flash read fails */
2290 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2294 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2295 E1000_ICH_NVM_SIG_VALUE) {
2301 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2306 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2307 E1000_ICH_NVM_SIG_VALUE) {
2312 DEBUGOUT("ERROR: No valid NVM bank present\n");
2313 ret_val = -E1000_ERR_NVM;
2321 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2322 * @hw: pointer to the HW structure
2323 * @offset: The offset (in bytes) of the word(s) to read.
2324 * @words: Size of data to read in words
2325 * @data: Pointer to the word(s) to read at offset.
2327 * Reads a word(s) from the NVM using the flash access registers.
2329 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2332 struct e1000_nvm_info *nvm = &hw->nvm;
2333 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2335 s32 ret_val = E1000_SUCCESS;
2339 DEBUGFUNC("e1000_read_nvm_ich8lan");
2341 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2343 DEBUGOUT("nvm parameter(s) out of bounds\n");
2344 ret_val = -E1000_ERR_NVM;
2348 nvm->ops.acquire(hw);
2350 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2351 if (ret_val != E1000_SUCCESS) {
2352 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2356 act_offset = (bank) ? nvm->flash_bank_size : 0;
2357 act_offset += offset;
2359 ret_val = E1000_SUCCESS;
2360 for (i = 0; i < words; i++) {
2361 if (dev_spec->shadow_ram[offset+i].modified) {
2362 data[i] = dev_spec->shadow_ram[offset+i].value;
2364 ret_val = e1000_read_flash_word_ich8lan(hw,
2373 nvm->ops.release(hw);
2377 DEBUGOUT1("NVM read error: %d\n", ret_val);
2383 * e1000_flash_cycle_init_ich8lan - Initialize flash
2384 * @hw: pointer to the HW structure
2386 * This function does initial flash setup so that a new read/write/erase cycle
2389 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2391 union ich8_hws_flash_status hsfsts;
2392 s32 ret_val = -E1000_ERR_NVM;
2394 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2396 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2398 /* Check if the flash descriptor is valid */
2399 if (hsfsts.hsf_status.fldesvalid == 0) {
2400 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
2404 /* Clear FCERR and DAEL in hw status by writing 1 */
2405 hsfsts.hsf_status.flcerr = 1;
2406 hsfsts.hsf_status.dael = 1;
2408 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2411 * Either we should have a hardware SPI cycle in progress
2412 * bit to check against, in order to start a new cycle or
2413 * FDONE bit should be changed in the hardware so that it
2414 * is 1 after hardware reset, which can then be used as an
2415 * indication whether a cycle is in progress or has been
2419 if (hsfsts.hsf_status.flcinprog == 0) {
2421 * There is no cycle running at present,
2422 * so we can start a cycle.
2423 * Begin by setting Flash Cycle Done.
2425 hsfsts.hsf_status.flcdone = 1;
2426 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2427 ret_val = E1000_SUCCESS;
2432 * Otherwise poll for sometime so the current
2433 * cycle has a chance to end before giving up.
2435 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2436 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2438 if (hsfsts.hsf_status.flcinprog == 0) {
2439 ret_val = E1000_SUCCESS;
2444 if (ret_val == E1000_SUCCESS) {
2446 * Successful in waiting for previous cycle to timeout,
2447 * now set the Flash Cycle Done.
2449 hsfsts.hsf_status.flcdone = 1;
2450 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2453 DEBUGOUT("Flash controller busy, cannot get access\n");
2462 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2463 * @hw: pointer to the HW structure
2464 * @timeout: maximum time to wait for completion
2466 * This function starts a flash cycle and waits for its completion.
2468 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2470 union ich8_hws_flash_ctrl hsflctl;
2471 union ich8_hws_flash_status hsfsts;
2472 s32 ret_val = -E1000_ERR_NVM;
2475 DEBUGFUNC("e1000_flash_cycle_ich8lan");
2477 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2478 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2479 hsflctl.hsf_ctrl.flcgo = 1;
2480 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2482 /* wait till FDONE bit is set to 1 */
2484 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2485 if (hsfsts.hsf_status.flcdone == 1)
2488 } while (i++ < timeout);
2490 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2491 ret_val = E1000_SUCCESS;
2497 * e1000_read_flash_word_ich8lan - Read word from flash
2498 * @hw: pointer to the HW structure
2499 * @offset: offset to data location
2500 * @data: pointer to the location for storing the data
2502 * Reads the flash word at offset into data. Offset is converted
2503 * to bytes before read.
2505 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2510 DEBUGFUNC("e1000_read_flash_word_ich8lan");
2513 ret_val = -E1000_ERR_NVM;
2517 /* Must convert offset into bytes. */
2520 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2527 * e1000_read_flash_byte_ich8lan - Read byte from flash
2528 * @hw: pointer to the HW structure
2529 * @offset: The offset of the byte to read.
2530 * @data: Pointer to a byte to store the value read.
2532 * Reads a single byte from the NVM using the flash access registers.
2534 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2537 s32 ret_val = E1000_SUCCESS;
2540 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2551 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2552 * @hw: pointer to the HW structure
2553 * @offset: The offset (in bytes) of the byte or word to read.
2554 * @size: Size of data to read, 1=byte 2=word
2555 * @data: Pointer to the word to store the value read.
2557 * Reads a byte or word from the NVM using the flash access registers.
2559 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2562 union ich8_hws_flash_status hsfsts;
2563 union ich8_hws_flash_ctrl hsflctl;
2564 u32 flash_linear_addr;
2566 s32 ret_val = -E1000_ERR_NVM;
2569 DEBUGFUNC("e1000_read_flash_data_ich8lan");
2571 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2574 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2575 hw->nvm.flash_base_addr;
2580 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2581 if (ret_val != E1000_SUCCESS)
2584 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2585 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2586 hsflctl.hsf_ctrl.fldbcount = size - 1;
2587 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2588 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2590 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2592 ret_val = e1000_flash_cycle_ich8lan(hw,
2593 ICH_FLASH_READ_COMMAND_TIMEOUT);
2596 * Check if FCERR is set to 1, if set to 1, clear it
2597 * and try the whole sequence a few more times, else
2598 * read in (shift in) the Flash Data0, the order is
2599 * least significant byte first msb to lsb
2601 if (ret_val == E1000_SUCCESS) {
2602 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2604 *data = (u8)(flash_data & 0x000000FF);
2606 *data = (u16)(flash_data & 0x0000FFFF);
2610 * If we've gotten here, then things are probably
2611 * completely hosed, but if the error condition is
2612 * detected, it won't hurt to give it another try...
2613 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2615 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2617 if (hsfsts.hsf_status.flcerr == 1) {
2618 /* Repeat for some time before giving up. */
2620 } else if (hsfsts.hsf_status.flcdone == 0) {
2621 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
2625 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2632 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2633 * @hw: pointer to the HW structure
2634 * @offset: The offset (in bytes) of the word(s) to write.
2635 * @words: Size of data to write in words
2636 * @data: Pointer to the word(s) to write at offset.
2638 * Writes a byte or word to the NVM using the flash access registers.
2640 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2643 struct e1000_nvm_info *nvm = &hw->nvm;
2644 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2645 s32 ret_val = E1000_SUCCESS;
2648 DEBUGFUNC("e1000_write_nvm_ich8lan");
2650 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2652 DEBUGOUT("nvm parameter(s) out of bounds\n");
2653 ret_val = -E1000_ERR_NVM;
2657 nvm->ops.acquire(hw);
2659 for (i = 0; i < words; i++) {
2660 dev_spec->shadow_ram[offset+i].modified = TRUE;
2661 dev_spec->shadow_ram[offset+i].value = data[i];
2664 nvm->ops.release(hw);
2671 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2672 * @hw: pointer to the HW structure
2674 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2675 * which writes the checksum to the shadow ram. The changes in the shadow
2676 * ram are then committed to the EEPROM by processing each bank at a time
2677 * checking for the modified bit and writing only the pending changes.
2678 * After a successful commit, the shadow ram is cleared and is ready for
2681 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2683 struct e1000_nvm_info *nvm = &hw->nvm;
2684 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2685 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2689 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2691 ret_val = e1000_update_nvm_checksum_generic(hw);
2695 if (nvm->type != e1000_nvm_flash_sw)
2698 nvm->ops.acquire(hw);
2701 * We're writing to the opposite bank so if we're on bank 1,
2702 * write to bank 0 etc. We also need to erase the segment that
2703 * is going to be written
2705 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2706 if (ret_val != E1000_SUCCESS) {
2707 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2712 new_bank_offset = nvm->flash_bank_size;
2713 old_bank_offset = 0;
2714 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2718 old_bank_offset = nvm->flash_bank_size;
2719 new_bank_offset = 0;
2720 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2725 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2727 * Determine whether to write the value stored
2728 * in the other NVM bank or a modified value stored
2731 if (dev_spec->shadow_ram[i].modified) {
2732 data = dev_spec->shadow_ram[i].value;
2734 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2742 * If the word is 0x13, then make sure the signature bits
2743 * (15:14) are 11b until the commit has completed.
2744 * This will allow us to write 10b which indicates the
2745 * signature is valid. We want to do this after the write
2746 * has completed so that we don't mark the segment valid
2747 * while the write is still in progress
2749 if (i == E1000_ICH_NVM_SIG_WORD)
2750 data |= E1000_ICH_NVM_SIG_MASK;
2752 /* Convert offset to bytes. */
2753 act_offset = (i + new_bank_offset) << 1;
2756 /* Write the bytes to the new bank. */
2757 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2764 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2772 * Don't bother writing the segment valid bits if sector
2773 * programming failed.
2776 DEBUGOUT("Flash commit failed.\n");
2781 * Finally validate the new segment by setting bit 15:14
2782 * to 10b in word 0x13 , this can be done without an
2783 * erase as well since these bits are 11 to start with
2784 * and we need to change bit 14 to 0b
2786 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2787 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2792 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2799 * And invalidate the previously valid segment by setting
2800 * its signature word (0x13) high_byte to 0b. This can be
2801 * done without an erase because flash erase sets all bits
2802 * to 1's. We can write 1's to 0's without an erase
2804 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2805 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2809 /* Great! Everything worked, we can now clear the cached entries. */
2810 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2811 dev_spec->shadow_ram[i].modified = FALSE;
2812 dev_spec->shadow_ram[i].value = 0xFFFF;
2816 nvm->ops.release(hw);
2819 * Reload the EEPROM, or else modifications will not appear
2820 * until after the next adapter reset.
2823 nvm->ops.reload(hw);
2829 DEBUGOUT1("NVM update error: %d\n", ret_val);
2835 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2836 * @hw: pointer to the HW structure
2838 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2839 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2840 * calculated, in which case we need to calculate the checksum and set bit 6.
2842 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2844 s32 ret_val = E1000_SUCCESS;
2847 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2850 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2851 * needs to be fixed. This bit is an indication that the NVM
2852 * was prepared by OEM software and did not calculate the
2853 * checksum...a likely scenario.
2855 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2859 if ((data & 0x40) == 0) {
2861 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2864 ret_val = hw->nvm.ops.update(hw);
2869 ret_val = e1000_validate_nvm_checksum_generic(hw);
2876 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2877 * @hw: pointer to the HW structure
2878 * @offset: The offset (in bytes) of the byte/word to read.
2879 * @size: Size of data to read, 1=byte 2=word
2880 * @data: The byte(s) to write to the NVM.
2882 * Writes one/two bytes to the NVM using the flash access registers.
2884 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2887 union ich8_hws_flash_status hsfsts;
2888 union ich8_hws_flash_ctrl hsflctl;
2889 u32 flash_linear_addr;
2891 s32 ret_val = -E1000_ERR_NVM;
2894 DEBUGFUNC("e1000_write_ich8_data");
2896 if (size < 1 || size > 2 || data > size * 0xff ||
2897 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2900 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2901 hw->nvm.flash_base_addr;
2906 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2907 if (ret_val != E1000_SUCCESS)
2910 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2911 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2912 hsflctl.hsf_ctrl.fldbcount = size - 1;
2913 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2914 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2916 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2919 flash_data = (u32)data & 0x00FF;
2921 flash_data = (u32)data;
2923 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2926 * check if FCERR is set to 1 , if set to 1, clear it
2927 * and try the whole sequence a few more times else done
2929 ret_val = e1000_flash_cycle_ich8lan(hw,
2930 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2931 if (ret_val == E1000_SUCCESS)
2935 * If we're here, then things are most likely
2936 * completely hosed, but if the error condition
2937 * is detected, it won't hurt to give it another
2938 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2940 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2941 if (hsfsts.hsf_status.flcerr == 1)
2942 /* Repeat for some time before giving up. */
2944 if (hsfsts.hsf_status.flcdone == 0) {
2945 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
2948 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2955 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2956 * @hw: pointer to the HW structure
2957 * @offset: The index of the byte to read.
2958 * @data: The byte to write to the NVM.
2960 * Writes a single byte to the NVM using the flash access registers.
2962 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2965 u16 word = (u16)data;
2967 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2969 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2973 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2974 * @hw: pointer to the HW structure
2975 * @offset: The offset of the byte to write.
2976 * @byte: The byte to write to the NVM.
2978 * Writes a single byte to the NVM using the flash access registers.
2979 * Goes through a retry algorithm before giving up.
2981 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2982 u32 offset, u8 byte)
2985 u16 program_retries;
2987 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2989 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2990 if (ret_val == E1000_SUCCESS)
2993 for (program_retries = 0; program_retries < 100; program_retries++) {
2994 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2996 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2997 if (ret_val == E1000_SUCCESS)
3000 if (program_retries == 100) {
3001 ret_val = -E1000_ERR_NVM;
3010 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3011 * @hw: pointer to the HW structure
3012 * @bank: 0 for first bank, 1 for second bank, etc.
3014 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3015 * bank N is 4096 * N + flash_reg_addr.
3017 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3019 struct e1000_nvm_info *nvm = &hw->nvm;
3020 union ich8_hws_flash_status hsfsts;
3021 union ich8_hws_flash_ctrl hsflctl;
3022 u32 flash_linear_addr;
3023 /* bank size is in 16bit words - adjust to bytes */
3024 u32 flash_bank_size = nvm->flash_bank_size * 2;
3025 s32 ret_val = E1000_SUCCESS;
3027 s32 j, iteration, sector_size;
3029 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3031 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3034 * Determine HW Sector size: Read BERASE bits of hw flash status
3036 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3037 * consecutive sectors. The start index for the nth Hw sector
3038 * can be calculated as = bank * 4096 + n * 256
3039 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3040 * The start index for the nth Hw sector can be calculated
3042 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3043 * (ich9 only, otherwise error condition)
3044 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3046 switch (hsfsts.hsf_status.berasesz) {
3048 /* Hw sector size 256 */
3049 sector_size = ICH_FLASH_SEG_SIZE_256;
3050 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3053 sector_size = ICH_FLASH_SEG_SIZE_4K;
3057 sector_size = ICH_FLASH_SEG_SIZE_8K;
3061 sector_size = ICH_FLASH_SEG_SIZE_64K;
3065 ret_val = -E1000_ERR_NVM;
3069 /* Start with the base address, then add the sector offset. */
3070 flash_linear_addr = hw->nvm.flash_base_addr;
3071 flash_linear_addr += (bank) ? flash_bank_size : 0;
3073 for (j = 0; j < iteration ; j++) {
3076 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3081 * Write a value 11 (block Erase) in Flash
3082 * Cycle field in hw flash control
3084 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3086 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3087 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3091 * Write the last 24 bits of an index within the
3092 * block into Flash Linear address field in Flash
3095 flash_linear_addr += (j * sector_size);
3096 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3099 ret_val = e1000_flash_cycle_ich8lan(hw,
3100 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3101 if (ret_val == E1000_SUCCESS)
3105 * Check if FCERR is set to 1. If 1,
3106 * clear it and try the whole sequence
3107 * a few more times else Done
3109 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3111 if (hsfsts.hsf_status.flcerr == 1)
3112 /* repeat for some time before giving up */
3114 else if (hsfsts.hsf_status.flcdone == 0)
3116 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3124 * e1000_valid_led_default_ich8lan - Set the default LED settings
3125 * @hw: pointer to the HW structure
3126 * @data: Pointer to the LED settings
3128 * Reads the LED default settings from the NVM to data. If the NVM LED
3129 * settings is all 0's or F's, set the LED default to a valid LED default
3132 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3136 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3138 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3140 DEBUGOUT("NVM Read Error\n");
3144 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3145 *data = ID_LED_DEFAULT_ICH8LAN;
3152 * e1000_id_led_init_pchlan - store LED configurations
3153 * @hw: pointer to the HW structure
3155 * PCH does not control LEDs via the LEDCTL register, rather it uses
3156 * the PHY LED configuration register.
3158 * PCH also does not have an "always on" or "always off" mode which
3159 * complicates the ID feature. Instead of using the "on" mode to indicate
3160 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3161 * use "link_up" mode. The LEDs will still ID on request if there is no
3162 * link based on logic in e1000_led_[on|off]_pchlan().
3164 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3166 struct e1000_mac_info *mac = &hw->mac;
3168 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3169 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3170 u16 data, i, temp, shift;
3172 DEBUGFUNC("e1000_id_led_init_pchlan");
3174 /* Get default ID LED modes */
3175 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3179 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3180 mac->ledctl_mode1 = mac->ledctl_default;
3181 mac->ledctl_mode2 = mac->ledctl_default;
3183 for (i = 0; i < 4; i++) {
3184 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3187 case ID_LED_ON1_DEF2:
3188 case ID_LED_ON1_ON2:
3189 case ID_LED_ON1_OFF2:
3190 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3191 mac->ledctl_mode1 |= (ledctl_on << shift);
3193 case ID_LED_OFF1_DEF2:
3194 case ID_LED_OFF1_ON2:
3195 case ID_LED_OFF1_OFF2:
3196 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3197 mac->ledctl_mode1 |= (ledctl_off << shift);
3204 case ID_LED_DEF1_ON2:
3205 case ID_LED_ON1_ON2:
3206 case ID_LED_OFF1_ON2:
3207 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3208 mac->ledctl_mode2 |= (ledctl_on << shift);
3210 case ID_LED_DEF1_OFF2:
3211 case ID_LED_ON1_OFF2:
3212 case ID_LED_OFF1_OFF2:
3213 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3214 mac->ledctl_mode2 |= (ledctl_off << shift);
3227 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3228 * @hw: pointer to the HW structure
3230 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3231 * register, so the bus width is hard coded.
3233 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3235 struct e1000_bus_info *bus = &hw->bus;
3238 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3240 ret_val = e1000_get_bus_info_pcie_generic(hw);
3243 * ICH devices are "PCI Express"-ish. They have
3244 * a configuration space, but do not contain
3245 * PCI Express Capability registers, so bus width
3246 * must be hardcoded.
3248 if (bus->width == e1000_bus_width_unknown)
3249 bus->width = e1000_bus_width_pcie_x1;
3255 * e1000_reset_hw_ich8lan - Reset the hardware
3256 * @hw: pointer to the HW structure
3258 * Does a full reset of the hardware which includes a reset of the PHY and
3261 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3263 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3268 DEBUGFUNC("e1000_reset_hw_ich8lan");
3271 * Prevent the PCI-E bus from sticking if there is no TLP connection
3272 * on the last TLP read/write transaction when MAC is reset.
3274 ret_val = e1000_disable_pcie_master_generic(hw);
3276 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3278 DEBUGOUT("Masking off all interrupts\n");
3279 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3282 * Disable the Transmit and Receive units. Then delay to allow
3283 * any pending transactions to complete before we hit the MAC
3284 * with the global reset.
3286 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3287 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3288 E1000_WRITE_FLUSH(hw);
3292 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3293 if (hw->mac.type == e1000_ich8lan) {
3294 /* Set Tx and Rx buffer allocation to 8k apiece. */
3295 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3296 /* Set Packet Buffer Size to 16k. */
3297 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3300 if (hw->mac.type == e1000_pchlan) {
3301 /* Save the NVM K1 bit setting*/
3302 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®);
3306 if (reg & E1000_NVM_K1_ENABLE)
3307 dev_spec->nvm_k1_enabled = TRUE;
3309 dev_spec->nvm_k1_enabled = FALSE;
3312 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3314 if (!hw->phy.ops.check_reset_block(hw)) {
3316 * Full-chip reset requires MAC and PHY reset at the same
3317 * time to make sure the interface between MAC and the
3318 * external PHY is reset.
3320 ctrl |= E1000_CTRL_PHY_RST;
3323 * Gate automatic PHY configuration by hardware on
3326 if ((hw->mac.type == e1000_pch2lan) &&
3327 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3328 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3330 ret_val = e1000_acquire_swflag_ich8lan(hw);
3331 DEBUGOUT("Issuing a global reset to ich8lan\n");
3332 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3333 /* cannot issue a flush here because it hangs the hardware */
3337 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
3339 if (ctrl & E1000_CTRL_PHY_RST) {
3340 ret_val = hw->phy.ops.get_cfg_done(hw);
3344 ret_val = e1000_post_phy_reset_ich8lan(hw);
3350 * For PCH, this write will make sure that any noise
3351 * will be detected as a CRC error and be dropped rather than show up
3352 * as a bad packet to the DMA engine.
3354 if (hw->mac.type == e1000_pchlan)
3355 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3357 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3358 E1000_READ_REG(hw, E1000_ICR);
3360 kab = E1000_READ_REG(hw, E1000_KABGTXD);
3361 kab |= E1000_KABGTXD_BGSQLBIAS;
3362 E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3369 * e1000_init_hw_ich8lan - Initialize the hardware
3370 * @hw: pointer to the HW structure
3372 * Prepares the hardware for transmit and receive by doing the following:
3373 * - initialize hardware bits
3374 * - initialize LED identification
3375 * - setup receive address registers
3376 * - setup flow control
3377 * - setup transmit descriptors
3378 * - clear statistics
3380 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3382 struct e1000_mac_info *mac = &hw->mac;
3383 u32 ctrl_ext, txdctl, snoop;
3387 DEBUGFUNC("e1000_init_hw_ich8lan");
3389 e1000_initialize_hw_bits_ich8lan(hw);
3391 /* Initialize identification LED */
3392 ret_val = mac->ops.id_led_init(hw);
3394 DEBUGOUT("Error initializing identification LED\n");
3395 /* This is not fatal and we should not stop init due to this */
3397 /* Setup the receive address. */
3398 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3400 /* Zero out the Multicast HASH table */
3401 DEBUGOUT("Zeroing the MTA\n");
3402 for (i = 0; i < mac->mta_reg_count; i++)
3403 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3406 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3407 * the ME. Disable wakeup by clearing the host wakeup bit.
3408 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3410 if (hw->phy.type == e1000_phy_82578) {
3411 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3412 i &= ~BM_WUC_HOST_WU_BIT;
3413 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3414 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3419 /* Setup link and flow control */
3420 ret_val = mac->ops.setup_link(hw);
3422 /* Set the transmit descriptor write-back policy for both queues */
3423 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3424 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3425 E1000_TXDCTL_FULL_TX_DESC_WB;
3426 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3427 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3428 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3429 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3430 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3431 E1000_TXDCTL_FULL_TX_DESC_WB;
3432 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3433 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3434 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3437 * ICH8 has opposite polarity of no_snoop bits.
3438 * By default, we should use snoop behavior.
3440 if (mac->type == e1000_ich8lan)
3441 snoop = PCIE_ICH8_SNOOP_ALL;
3443 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3444 e1000_set_pcie_no_snoop_generic(hw, snoop);
3446 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3447 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3448 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3451 * Clear all of the statistics registers (clear on read). It is
3452 * important that we do this after we have tried to establish link
3453 * because the symbol error count will increment wildly if there
3456 e1000_clear_hw_cntrs_ich8lan(hw);
3461 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3462 * @hw: pointer to the HW structure
3464 * Sets/Clears required hardware bits necessary for correctly setting up the
3465 * hardware for transmit and receive.
3467 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3471 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3473 /* Extended Device Control */
3474 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3476 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3477 if (hw->mac.type >= e1000_pchlan)
3478 reg |= E1000_CTRL_EXT_PHYPDEN;
3479 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3481 /* Transmit Descriptor Control 0 */
3482 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3484 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3486 /* Transmit Descriptor Control 1 */
3487 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3489 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3491 /* Transmit Arbitration Control 0 */
3492 reg = E1000_READ_REG(hw, E1000_TARC(0));
3493 if (hw->mac.type == e1000_ich8lan)
3494 reg |= (1 << 28) | (1 << 29);
3495 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3496 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3498 /* Transmit Arbitration Control 1 */
3499 reg = E1000_READ_REG(hw, E1000_TARC(1));
3500 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3504 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3505 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3508 if (hw->mac.type == e1000_ich8lan) {
3509 reg = E1000_READ_REG(hw, E1000_STATUS);
3511 E1000_WRITE_REG(hw, E1000_STATUS, reg);
3515 * work-around descriptor data corruption issue during nfs v2 udp
3516 * traffic, just disable the nfs filtering capability
3518 reg = E1000_READ_REG(hw, E1000_RFCTL);
3519 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3520 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3526 * e1000_setup_link_ich8lan - Setup flow control and link settings
3527 * @hw: pointer to the HW structure
3529 * Determines which flow control settings to use, then configures flow
3530 * control. Calls the appropriate media-specific link configuration
3531 * function. Assuming the adapter has a valid link partner, a valid link
3532 * should be established. Assumes the hardware has previously been reset
3533 * and the transmitter and receiver are not enabled.
3535 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3537 s32 ret_val = E1000_SUCCESS;
3539 DEBUGFUNC("e1000_setup_link_ich8lan");
3541 if (hw->phy.ops.check_reset_block(hw))
3545 * ICH parts do not have a word in the NVM to determine
3546 * the default flow control setting, so we explicitly
3549 if (hw->fc.requested_mode == e1000_fc_default)
3550 hw->fc.requested_mode = e1000_fc_full;
3553 * Save off the requested flow control mode for use later. Depending
3554 * on the link partner's capabilities, we may or may not use this mode.
3556 hw->fc.current_mode = hw->fc.requested_mode;
3558 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3559 hw->fc.current_mode);
3561 /* Continue to configure the copper link. */
3562 ret_val = hw->mac.ops.setup_physical_interface(hw);
3566 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3567 if ((hw->phy.type == e1000_phy_82578) ||
3568 (hw->phy.type == e1000_phy_82579) ||
3569 (hw->phy.type == e1000_phy_82577)) {
3570 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3572 ret_val = hw->phy.ops.write_reg(hw,
3573 PHY_REG(BM_PORT_CTRL_PAGE, 27),
3579 ret_val = e1000_set_fc_watermarks_generic(hw);
3586 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3587 * @hw: pointer to the HW structure
3589 * Configures the kumeran interface to the PHY to wait the appropriate time
3590 * when polling the PHY, then call the generic setup_copper_link to finish
3591 * configuring the copper link.
3593 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3599 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3601 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3602 ctrl |= E1000_CTRL_SLU;
3603 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3604 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3607 * Set the mac to wait the maximum time between each iteration
3608 * and increase the max iterations when polling the phy;
3609 * this fixes erroneous timeouts at 10Mbps.
3611 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3615 ret_val = e1000_read_kmrn_reg_generic(hw,
3616 E1000_KMRNCTRLSTA_INBAND_PARAM,
3621 ret_val = e1000_write_kmrn_reg_generic(hw,
3622 E1000_KMRNCTRLSTA_INBAND_PARAM,
3627 switch (hw->phy.type) {
3628 case e1000_phy_igp_3:
3629 ret_val = e1000_copper_link_setup_igp(hw);
3634 case e1000_phy_82578:
3635 ret_val = e1000_copper_link_setup_m88(hw);
3639 case e1000_phy_82577:
3640 case e1000_phy_82579:
3641 ret_val = e1000_copper_link_setup_82577(hw);
3646 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3651 reg_data &= ~IFE_PMC_AUTO_MDIX;
3653 switch (hw->phy.mdix) {
3655 reg_data &= ~IFE_PMC_FORCE_MDIX;
3658 reg_data |= IFE_PMC_FORCE_MDIX;
3662 reg_data |= IFE_PMC_AUTO_MDIX;
3665 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3673 ret_val = e1000_setup_copper_link_generic(hw);
3680 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3681 * @hw: pointer to the HW structure
3682 * @speed: pointer to store current link speed
3683 * @duplex: pointer to store the current link duplex
3685 * Calls the generic get_speed_and_duplex to retrieve the current link
3686 * information and then calls the Kumeran lock loss workaround for links at
3689 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3694 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3696 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3700 if ((hw->mac.type == e1000_ich8lan) &&
3701 (hw->phy.type == e1000_phy_igp_3) &&
3702 (*speed == SPEED_1000)) {
3703 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3711 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3712 * @hw: pointer to the HW structure
3714 * Work-around for 82566 Kumeran PCS lock loss:
3715 * On link status change (i.e. PCI reset, speed change) and link is up and
3717 * 0) if workaround is optionally disabled do nothing
3718 * 1) wait 1ms for Kumeran link to come up
3719 * 2) check Kumeran Diagnostic register PCS lock loss bit
3720 * 3) if not set the link is locked (all is good), otherwise...
3722 * 5) repeat up to 10 times
3723 * Note: this is only called for IGP3 copper when speed is 1gb.
3725 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3727 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3729 s32 ret_val = E1000_SUCCESS;
3733 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3735 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3739 * Make sure link is up before proceeding. If not just return.
3740 * Attempting this while link is negotiating fouled up link
3743 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3745 ret_val = E1000_SUCCESS;
3749 for (i = 0; i < 10; i++) {
3750 /* read once to clear */
3751 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3754 /* and again to get new status */
3755 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3759 /* check for PCS lock */
3760 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3761 ret_val = E1000_SUCCESS;
3765 /* Issue PHY reset */
3766 hw->phy.ops.reset(hw);
3769 /* Disable GigE link negotiation */
3770 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3771 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3772 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3773 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3776 * Call gig speed drop workaround on Gig disable before accessing
3779 e1000_gig_downshift_workaround_ich8lan(hw);
3781 /* unable to acquire PCS lock */
3782 ret_val = -E1000_ERR_PHY;
3789 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3790 * @hw: pointer to the HW structure
3791 * @state: boolean value used to set the current Kumeran workaround state
3793 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
3794 * /disabled - FALSE).
3796 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3799 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3801 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3803 if (hw->mac.type != e1000_ich8lan) {
3804 DEBUGOUT("Workaround applies to ICH8 only.\n");
3808 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3814 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3815 * @hw: pointer to the HW structure
3817 * Workaround for 82566 power-down on D3 entry:
3818 * 1) disable gigabit link
3819 * 2) write VR power-down enable
3821 * Continue if successful, else issue LCD reset and repeat
3823 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3829 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3831 if (hw->phy.type != e1000_phy_igp_3)
3834 /* Try the workaround twice (if needed) */
3837 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3838 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3839 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3840 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3843 * Call gig speed drop workaround on Gig disable before
3844 * accessing any PHY registers
3846 if (hw->mac.type == e1000_ich8lan)
3847 e1000_gig_downshift_workaround_ich8lan(hw);
3849 /* Write VR power-down enable */
3850 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3851 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3852 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3853 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3855 /* Read it back and test */
3856 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3857 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3858 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3861 /* Issue PHY reset and repeat at most one more time */
3862 reg = E1000_READ_REG(hw, E1000_CTRL);
3863 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3872 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3873 * @hw: pointer to the HW structure
3875 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3876 * LPLU, Gig disable, MDIC PHY reset):
3877 * 1) Set Kumeran Near-end loopback
3878 * 2) Clear Kumeran Near-end loopback
3879 * Should only be called for ICH8[m] devices with any 1G Phy.
3881 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3883 s32 ret_val = E1000_SUCCESS;
3886 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3888 if ((hw->mac.type != e1000_ich8lan) ||
3889 (hw->phy.type == e1000_phy_ife))
3892 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3896 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3897 ret_val = e1000_write_kmrn_reg_generic(hw,
3898 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3902 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3903 ret_val = e1000_write_kmrn_reg_generic(hw,
3904 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3911 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3912 * @hw: pointer to the HW structure
3914 * During S0 to Sx transition, it is possible the link remains at gig
3915 * instead of negotiating to a lower speed. Before going to Sx, set
3916 * 'Gig Disable' to force link speed negotiation to a lower speed based on
3917 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
3918 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3919 * needs to be written.
3921 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3926 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
3928 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3929 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
3930 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3931 if (hw->mac.type == e1000_ich8lan)
3932 e1000_gig_downshift_workaround_ich8lan(hw);
3934 if (hw->mac.type >= e1000_pchlan) {
3935 e1000_oem_bits_config_ich8lan(hw, FALSE);
3936 e1000_phy_hw_reset_ich8lan(hw);
3937 ret_val = hw->phy.ops.acquire(hw);
3940 e1000_write_smbus_addr(hw);
3941 hw->phy.ops.release(hw);
3948 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
3949 * @hw: pointer to the HW structure
3951 * During Sx to S0 transitions on non-managed devices or managed devices
3952 * on which PHY resets are not blocked, if the PHY registers cannot be
3953 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
3956 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3958 u16 phy_id1, phy_id2;
3961 DEBUGFUNC("e1000_resume_workarounds_pchlan");
3963 if ((hw->mac.type != e1000_pch2lan) ||
3964 hw->phy.ops.check_reset_block(hw))
3967 ret_val = hw->phy.ops.acquire(hw);
3969 DEBUGOUT("Failed to acquire PHY semaphore in resume\n");
3973 /* Test access to the PHY registers by reading the ID regs */
3974 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
3977 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
3981 if (hw->phy.id == ((u32)(phy_id1 << 16) |
3982 (u32)(phy_id2 & PHY_REVISION_MASK)))
3985 e1000_toggle_lanphypc_value_ich8lan(hw);
3987 hw->phy.ops.release(hw);
3989 hw->phy.ops.reset(hw);
3994 hw->phy.ops.release(hw);
4000 * e1000_cleanup_led_ich8lan - Restore the default LED operation
4001 * @hw: pointer to the HW structure
4003 * Return the LED back to the default configuration.
4005 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4007 DEBUGFUNC("e1000_cleanup_led_ich8lan");
4009 if (hw->phy.type == e1000_phy_ife)
4010 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4013 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4014 return E1000_SUCCESS;
4018 * e1000_led_on_ich8lan - Turn LEDs on
4019 * @hw: pointer to the HW structure
4023 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4025 DEBUGFUNC("e1000_led_on_ich8lan");
4027 if (hw->phy.type == e1000_phy_ife)
4028 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4029 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4031 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4032 return E1000_SUCCESS;
4036 * e1000_led_off_ich8lan - Turn LEDs off
4037 * @hw: pointer to the HW structure
4039 * Turn off the LEDs.
4041 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4043 DEBUGFUNC("e1000_led_off_ich8lan");
4045 if (hw->phy.type == e1000_phy_ife)
4046 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4047 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4049 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4050 return E1000_SUCCESS;
4054 * e1000_setup_led_pchlan - Configures SW controllable LED
4055 * @hw: pointer to the HW structure
4057 * This prepares the SW controllable LED for use.
4059 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4061 DEBUGFUNC("e1000_setup_led_pchlan");
4063 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4064 (u16)hw->mac.ledctl_mode1);
4068 * e1000_cleanup_led_pchlan - Restore the default LED operation
4069 * @hw: pointer to the HW structure
4071 * Return the LED back to the default configuration.
4073 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4075 DEBUGFUNC("e1000_cleanup_led_pchlan");
4077 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4078 (u16)hw->mac.ledctl_default);
4082 * e1000_led_on_pchlan - Turn LEDs on
4083 * @hw: pointer to the HW structure
4087 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4089 u16 data = (u16)hw->mac.ledctl_mode2;
4092 DEBUGFUNC("e1000_led_on_pchlan");
4095 * If no link, then turn LED on by setting the invert bit
4096 * for each LED that's mode is "link_up" in ledctl_mode2.
4098 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4099 for (i = 0; i < 3; i++) {
4100 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4101 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4102 E1000_LEDCTL_MODE_LINK_UP)
4104 if (led & E1000_PHY_LED0_IVRT)
4105 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4107 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4111 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4115 * e1000_led_off_pchlan - Turn LEDs off
4116 * @hw: pointer to the HW structure
4118 * Turn off the LEDs.
4120 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4122 u16 data = (u16)hw->mac.ledctl_mode1;
4125 DEBUGFUNC("e1000_led_off_pchlan");
4128 * If no link, then turn LED off by clearing the invert bit
4129 * for each LED that's mode is "link_up" in ledctl_mode1.
4131 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4132 for (i = 0; i < 3; i++) {
4133 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4134 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4135 E1000_LEDCTL_MODE_LINK_UP)
4137 if (led & E1000_PHY_LED0_IVRT)
4138 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4140 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4144 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4148 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4149 * @hw: pointer to the HW structure
4151 * Read appropriate register for the config done bit for completion status
4152 * and configure the PHY through s/w for EEPROM-less parts.
4154 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4155 * config done bit, so only an error is logged and continues. If we were
4156 * to return with error, EEPROM-less silicon would not be able to be reset
4159 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4161 s32 ret_val = E1000_SUCCESS;
4165 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4167 e1000_get_cfg_done_generic(hw);
4169 /* Wait for indication from h/w that it has completed basic config */
4170 if (hw->mac.type >= e1000_ich10lan) {
4171 e1000_lan_init_done_ich8lan(hw);
4173 ret_val = e1000_get_auto_rd_done_generic(hw);
4176 * When auto config read does not complete, do not
4177 * return with an error. This can happen in situations
4178 * where there is no eeprom and prevents getting link.
4180 DEBUGOUT("Auto Read Done did not complete\n");
4181 ret_val = E1000_SUCCESS;
4185 /* Clear PHY Reset Asserted bit */
4186 status = E1000_READ_REG(hw, E1000_STATUS);
4187 if (status & E1000_STATUS_PHYRA)
4188 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4190 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4192 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4193 if (hw->mac.type <= e1000_ich9lan) {
4194 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4195 (hw->phy.type == e1000_phy_igp_3)) {
4196 e1000_phy_init_script_igp3(hw);
4199 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4200 /* Maybe we should do a basic PHY config */
4201 DEBUGOUT("EEPROM not present\n");
4202 ret_val = -E1000_ERR_CONFIG;
4210 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4211 * @hw: pointer to the HW structure
4213 * In the case of a PHY power down to save power, or to turn off link during a
4214 * driver unload, or wake on lan is not enabled, remove the link.
4216 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4218 /* If the management interface is not enabled, then power down */
4219 if (!(hw->mac.ops.check_mng_mode(hw) ||
4220 hw->phy.ops.check_reset_block(hw)))
4221 e1000_power_down_phy_copper(hw);
4227 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4228 * @hw: pointer to the HW structure
4230 * Clears hardware counters specific to the silicon family and calls
4231 * clear_hw_cntrs_generic to clear all general purpose counters.
4233 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4238 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4240 e1000_clear_hw_cntrs_base_generic(hw);
4242 E1000_READ_REG(hw, E1000_ALGNERRC);
4243 E1000_READ_REG(hw, E1000_RXERRC);
4244 E1000_READ_REG(hw, E1000_TNCRS);
4245 E1000_READ_REG(hw, E1000_CEXTERR);
4246 E1000_READ_REG(hw, E1000_TSCTC);
4247 E1000_READ_REG(hw, E1000_TSCTFC);
4249 E1000_READ_REG(hw, E1000_MGTPRC);
4250 E1000_READ_REG(hw, E1000_MGTPDC);
4251 E1000_READ_REG(hw, E1000_MGTPTC);
4253 E1000_READ_REG(hw, E1000_IAC);
4254 E1000_READ_REG(hw, E1000_ICRXOC);
4256 /* Clear PHY statistics registers */
4257 if ((hw->phy.type == e1000_phy_82578) ||
4258 (hw->phy.type == e1000_phy_82579) ||
4259 (hw->phy.type == e1000_phy_82577)) {
4260 ret_val = hw->phy.ops.acquire(hw);
4263 ret_val = hw->phy.ops.set_page(hw,
4264 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4267 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4268 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4269 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4270 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4271 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4272 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4273 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4274 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4275 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4276 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4277 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4278 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4279 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4280 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4282 hw->phy.ops.release(hw);