1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * 82562G 10/100 Network Connection
37 * 82562G-2 10/100 Network Connection
38 * 82562GT 10/100 Network Connection
39 * 82562GT-2 10/100 Network Connection
40 * 82562V 10/100 Network Connection
41 * 82562V-2 10/100 Network Connection
42 * 82566DC-2 Gigabit Network Connection
43 * 82566DC Gigabit Network Connection
44 * 82566DM-2 Gigabit Network Connection
45 * 82566DM Gigabit Network Connection
46 * 82566MC Gigabit Network Connection
47 * 82566MM Gigabit Network Connection
48 * 82567LM Gigabit Network Connection
49 * 82567LF Gigabit Network Connection
50 * 82567V Gigabit Network Connection
51 * 82567LM-2 Gigabit Network Connection
52 * 82567LF-2 Gigabit Network Connection
53 * 82567V-2 Gigabit Network Connection
54 * 82567LF-3 Gigabit Network Connection
55 * 82567LM-3 Gigabit Network Connection
56 * 82567LM-4 Gigabit Network Connection
57 * 82577LM Gigabit Network Connection
58 * 82577LC Gigabit Network Connection
59 * 82578DM Gigabit Network Connection
60 * 82578DC Gigabit Network Connection
61 * 82579LM Gigabit Network Connection
62 * 82579V Gigabit Network Connection
65 #include "e1000_api.h"
67 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
81 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
82 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
83 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
84 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
86 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
88 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
89 u16 words, u16 *data);
90 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
91 u16 words, u16 *data);
92 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
93 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
94 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
96 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
97 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
98 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
99 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
100 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
101 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
102 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
103 u16 *speed, u16 *duplex);
104 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
105 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
107 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
108 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
109 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
110 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
111 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
112 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
113 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
114 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
115 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
116 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
117 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
118 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
119 u32 offset, u8 *data);
120 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
122 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
123 u32 offset, u16 *data);
124 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
125 u32 offset, u8 byte);
126 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
127 u32 offset, u8 data);
128 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
130 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
131 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
133 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
134 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
135 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
143 u16 flcdone :1; /* bit 0 Flash Cycle Done */
144 u16 flcerr :1; /* bit 1 Flash Cycle Error */
145 u16 dael :1; /* bit 2 Direct Access error Log */
146 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
147 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
148 u16 reserved1 :2; /* bit 13:6 Reserved */
149 u16 reserved2 :6; /* bit 13:6 Reserved */
150 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
151 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159 struct ich8_hsflctl {
160 u16 flcgo :1; /* 0 Flash Cycle Go */
161 u16 flcycle :2; /* 2:1 Flash Cycle */
162 u16 reserved :5; /* 7:3 Reserved */
163 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
164 u16 flockdn :6; /* 15:10 Reserved */
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
172 u32 grra :8; /* 0:7 GbE region Read Access */
173 u32 grwa :8; /* 8:15 GbE region Write Access */
174 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
175 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
181 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
182 * @hw: pointer to the HW structure
184 * Initialize family-specific PHY parameters and function pointers.
186 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
188 struct e1000_phy_info *phy = &hw->phy;
190 s32 ret_val = E1000_SUCCESS;
192 DEBUGFUNC("e1000_init_phy_params_pchlan");
195 phy->reset_delay_us = 100;
197 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
198 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
199 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
200 phy->ops.read_reg = e1000_read_phy_reg_hv;
201 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
202 phy->ops.release = e1000_release_swflag_ich8lan;
203 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
204 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
205 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
206 phy->ops.write_reg = e1000_write_phy_reg_hv;
207 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
208 phy->ops.power_up = e1000_power_up_phy_copper;
209 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
210 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
213 * The MAC-PHY interconnect may still be in SMBus mode
214 * after Sx->S0. If the manageability engine (ME) is
215 * disabled, then toggle the LANPHYPC Value bit to force
216 * the interconnect to PCIe mode.
218 fwsm = E1000_READ_REG(hw, E1000_FWSM);
219 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) &&
220 !(hw->phy.ops.check_reset_block(hw))) {
221 ctrl = E1000_READ_REG(hw, E1000_CTRL);
222 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
223 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
224 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
226 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
227 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
231 * Gate automatic PHY configuration by hardware on
234 if (hw->mac.type == e1000_pch2lan)
235 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
239 * Reset the PHY before any acccess to it. Doing so, ensures that
240 * the PHY is in a known good state before we read/write PHY registers.
241 * The generic reset is sufficient here, because we haven't determined
244 ret_val = e1000_phy_hw_reset_generic(hw);
248 /* Ungate automatic PHY configuration on non-managed 82579 */
249 if ((hw->mac.type == e1000_pch2lan) &&
250 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
252 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
255 phy->id = e1000_phy_unknown;
256 switch (hw->mac.type) {
258 ret_val = e1000_get_phy_id(hw);
261 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
266 * In case the PHY needs to be in mdio slow mode,
267 * set slow mode and try to get the PHY id again.
269 ret_val = e1000_set_mdio_slow_mode_hv(hw);
272 ret_val = e1000_get_phy_id(hw);
277 phy->type = e1000_get_phy_type_from_id(phy->id);
280 case e1000_phy_82577:
281 case e1000_phy_82579:
282 phy->ops.check_polarity = e1000_check_polarity_82577;
283 phy->ops.force_speed_duplex =
284 e1000_phy_force_speed_duplex_82577;
285 phy->ops.get_cable_length = e1000_get_cable_length_82577;
286 phy->ops.get_info = e1000_get_phy_info_82577;
287 phy->ops.commit = e1000_phy_sw_reset_generic;
289 case e1000_phy_82578:
290 phy->ops.check_polarity = e1000_check_polarity_m88;
291 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
292 phy->ops.get_cable_length = e1000_get_cable_length_m88;
293 phy->ops.get_info = e1000_get_phy_info_m88;
296 ret_val = -E1000_ERR_PHY;
305 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
306 * @hw: pointer to the HW structure
308 * Initialize family-specific PHY parameters and function pointers.
310 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
312 struct e1000_phy_info *phy = &hw->phy;
313 s32 ret_val = E1000_SUCCESS;
316 DEBUGFUNC("e1000_init_phy_params_ich8lan");
319 phy->reset_delay_us = 100;
321 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
322 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
323 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
324 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
325 phy->ops.read_reg = e1000_read_phy_reg_igp;
326 phy->ops.release = e1000_release_swflag_ich8lan;
327 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
328 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
329 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
330 phy->ops.write_reg = e1000_write_phy_reg_igp;
331 phy->ops.power_up = e1000_power_up_phy_copper;
332 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
335 * We may need to do this twice - once for IGP and if that fails,
336 * we'll set BM func pointers and try again
338 ret_val = e1000_determine_phy_address(hw);
340 phy->ops.write_reg = e1000_write_phy_reg_bm;
341 phy->ops.read_reg = e1000_read_phy_reg_bm;
342 ret_val = e1000_determine_phy_address(hw);
344 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
350 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
353 ret_val = e1000_get_phy_id(hw);
360 case IGP03E1000_E_PHY_ID:
361 phy->type = e1000_phy_igp_3;
362 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
363 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
364 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
365 phy->ops.get_info = e1000_get_phy_info_igp;
366 phy->ops.check_polarity = e1000_check_polarity_igp;
367 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
370 case IFE_PLUS_E_PHY_ID:
372 phy->type = e1000_phy_ife;
373 phy->autoneg_mask = E1000_ALL_NOT_GIG;
374 phy->ops.get_info = e1000_get_phy_info_ife;
375 phy->ops.check_polarity = e1000_check_polarity_ife;
376 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
378 case BME1000_E_PHY_ID:
379 phy->type = e1000_phy_bm;
380 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
381 phy->ops.read_reg = e1000_read_phy_reg_bm;
382 phy->ops.write_reg = e1000_write_phy_reg_bm;
383 phy->ops.commit = e1000_phy_sw_reset_generic;
384 phy->ops.get_info = e1000_get_phy_info_m88;
385 phy->ops.check_polarity = e1000_check_polarity_m88;
386 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
389 ret_val = -E1000_ERR_PHY;
398 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
399 * @hw: pointer to the HW structure
401 * Initialize family-specific NVM parameters and function
404 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
406 struct e1000_nvm_info *nvm = &hw->nvm;
407 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
408 u32 gfpreg, sector_base_addr, sector_end_addr;
409 s32 ret_val = E1000_SUCCESS;
412 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
414 /* Can't read flash registers if the register set isn't mapped. */
415 if (!hw->flash_address) {
416 DEBUGOUT("ERROR: Flash registers not mapped\n");
417 ret_val = -E1000_ERR_CONFIG;
421 nvm->type = e1000_nvm_flash_sw;
423 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
426 * sector_X_addr is a "sector"-aligned address (4096 bytes)
427 * Add 1 to sector_end_addr since this sector is included in
430 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
431 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
433 /* flash_base_addr is byte-aligned */
434 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
437 * find total size of the NVM, then cut in half since the total
438 * size represents two separate NVM banks.
440 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
441 << FLASH_SECTOR_ADDR_SHIFT;
442 nvm->flash_bank_size /= 2;
443 /* Adjust to word count */
444 nvm->flash_bank_size /= sizeof(u16);
446 nvm->word_size = E1000_SHADOW_RAM_WORDS;
448 /* Clear shadow ram */
449 for (i = 0; i < nvm->word_size; i++) {
450 dev_spec->shadow_ram[i].modified = FALSE;
451 dev_spec->shadow_ram[i].value = 0xFFFF;
454 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
455 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
457 /* Function Pointers */
458 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
459 nvm->ops.release = e1000_release_nvm_ich8lan;
460 nvm->ops.read = e1000_read_nvm_ich8lan;
461 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
462 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
463 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
464 nvm->ops.write = e1000_write_nvm_ich8lan;
471 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
472 * @hw: pointer to the HW structure
474 * Initialize family-specific MAC parameters and function
477 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
479 struct e1000_mac_info *mac = &hw->mac;
482 DEBUGFUNC("e1000_init_mac_params_ich8lan");
484 /* Set media type function pointer */
485 hw->phy.media_type = e1000_media_type_copper;
487 /* Set mta register count */
488 mac->mta_reg_count = 32;
489 /* Set rar entry count */
490 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
491 if (mac->type == e1000_ich8lan)
492 mac->rar_entry_count--;
493 /* Set if part includes ASF firmware */
494 mac->asf_firmware_present = TRUE;
496 mac->has_fwsm = TRUE;
497 /* ARC subsystem not supported */
498 mac->arc_subsystem_valid = FALSE;
499 /* Adaptive IFS supported */
500 mac->adaptive_ifs = TRUE;
502 /* Function pointers */
504 /* bus type/speed/width */
505 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
507 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
509 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
510 /* hw initialization */
511 mac->ops.init_hw = e1000_init_hw_ich8lan;
513 mac->ops.setup_link = e1000_setup_link_ich8lan;
514 /* physical interface setup */
515 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
517 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
519 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
520 /* multicast address update */
521 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
522 /* clear hardware counters */
523 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
530 /* check management mode */
531 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
533 mac->ops.id_led_init = e1000_id_led_init_generic;
535 mac->ops.blink_led = e1000_blink_led_generic;
537 mac->ops.setup_led = e1000_setup_led_generic;
539 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
540 /* turn on/off LED */
541 mac->ops.led_on = e1000_led_on_ich8lan;
542 mac->ops.led_off = e1000_led_off_ich8lan;
545 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
546 mac->ops.rar_set = e1000_rar_set_pch2lan;
547 /* multicast address update for pch2 */
548 mac->ops.update_mc_addr_list =
549 e1000_update_mc_addr_list_pch2lan;
552 /* save PCH revision_id */
553 e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
554 hw->revision_id = (u8)(pci_cfg &= 0x000F);
555 /* check management mode */
556 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
558 mac->ops.id_led_init = e1000_id_led_init_pchlan;
560 mac->ops.setup_led = e1000_setup_led_pchlan;
562 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
563 /* turn on/off LED */
564 mac->ops.led_on = e1000_led_on_pchlan;
565 mac->ops.led_off = e1000_led_off_pchlan;
571 /* Enable PCS Lock-loss workaround for ICH8 */
572 if (mac->type == e1000_ich8lan)
573 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
575 /* Gate automatic PHY configuration by hardware on managed 82579 */
576 if ((mac->type == e1000_pch2lan) &&
577 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
578 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
580 return E1000_SUCCESS;
584 * e1000_set_eee_pchlan - Enable/disable EEE support
585 * @hw: pointer to the HW structure
587 * Enable/disable EEE based on setting in dev_spec structure. The bits in
588 * the LPI Control register will remain set only if/when link is up.
590 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
592 s32 ret_val = E1000_SUCCESS;
595 DEBUGFUNC("e1000_set_eee_pchlan");
597 if (hw->phy.type != e1000_phy_82579)
600 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
604 if (hw->dev_spec.ich8lan.eee_disable)
605 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
607 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
609 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
615 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
616 * @hw: pointer to the HW structure
618 * Checks to see of the link status of the hardware has changed. If a
619 * change in link status has been detected, then we read the PHY registers
620 * to get the current speed/duplex if link exists.
622 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
624 struct e1000_mac_info *mac = &hw->mac;
628 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
631 * We only want to go out to the PHY registers to see if Auto-Neg
632 * has completed and/or if our link status has changed. The
633 * get_link_status flag is set upon receiving a Link Status
634 * Change or Rx Sequence Error interrupt.
636 if (!mac->get_link_status) {
637 ret_val = E1000_SUCCESS;
642 * First we want to see if the MII Status Register reports
643 * link. If so, then we want to get the current speed/duplex
646 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
650 if (hw->mac.type == e1000_pchlan) {
651 ret_val = e1000_k1_gig_workaround_hv(hw, link);
657 goto out; /* No link detected */
659 mac->get_link_status = FALSE;
661 if (hw->phy.type == e1000_phy_82578) {
662 ret_val = e1000_link_stall_workaround_hv(hw);
667 if (hw->mac.type == e1000_pch2lan) {
668 ret_val = e1000_k1_workaround_lv(hw);
674 * Check if there was DownShift, must be checked
675 * immediately after link-up
677 e1000_check_downshift_generic(hw);
679 /* Enable/Disable EEE after link up */
680 ret_val = e1000_set_eee_pchlan(hw);
685 * If we are forcing speed/duplex, then we simply return since
686 * we have already determined whether we have link or not.
689 ret_val = -E1000_ERR_CONFIG;
694 * Auto-Neg is enabled. Auto Speed Detection takes care
695 * of MAC speed/duplex configuration. So we only need to
696 * configure Collision Distance in the MAC.
698 e1000_config_collision_dist_generic(hw);
701 * Configure Flow Control now that Auto-Neg has completed.
702 * First, we need to restore the desired flow control
703 * settings because we may have had to re-autoneg with a
704 * different link partner.
706 ret_val = e1000_config_fc_after_link_up_generic(hw);
708 DEBUGOUT("Error configuring flow control\n");
715 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
716 * @hw: pointer to the HW structure
718 * Initialize family-specific function pointers for PHY, MAC, and NVM.
720 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
722 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
724 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
725 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
726 switch (hw->mac.type) {
730 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
734 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
742 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
743 * @hw: pointer to the HW structure
745 * Acquires the mutex for performing NVM operations.
747 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
749 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
751 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
753 return E1000_SUCCESS;
757 * e1000_release_nvm_ich8lan - Release NVM mutex
758 * @hw: pointer to the HW structure
760 * Releases the mutex used while performing NVM operations.
762 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
764 DEBUGFUNC("e1000_release_nvm_ich8lan");
766 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
772 * e1000_acquire_swflag_ich8lan - Acquire software control flag
773 * @hw: pointer to the HW structure
775 * Acquires the software control flag for performing PHY and select
778 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
780 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
781 s32 ret_val = E1000_SUCCESS;
783 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
785 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
788 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
789 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
797 DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
798 ret_val = -E1000_ERR_CONFIG;
802 timeout = SW_FLAG_TIMEOUT;
804 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
805 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
808 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
809 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
817 DEBUGOUT("Failed to acquire the semaphore.\n");
818 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
819 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
820 ret_val = -E1000_ERR_CONFIG;
826 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
832 * e1000_release_swflag_ich8lan - Release software control flag
833 * @hw: pointer to the HW structure
835 * Releases the software control flag for performing PHY and select
838 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
842 DEBUGFUNC("e1000_release_swflag_ich8lan");
844 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
846 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
847 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
848 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
850 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
853 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
859 * e1000_check_mng_mode_ich8lan - Checks management mode
860 * @hw: pointer to the HW structure
862 * This checks if the adapter has any manageability enabled.
863 * This is a function pointer entry point only called by read/write
864 * routines for the PHY and NVM parts.
866 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
870 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
872 fwsm = E1000_READ_REG(hw, E1000_FWSM);
874 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
875 ((fwsm & E1000_FWSM_MODE_MASK) ==
876 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
880 * e1000_check_mng_mode_pchlan - Checks management mode
881 * @hw: pointer to the HW structure
883 * This checks if the adapter has iAMT enabled.
884 * This is a function pointer entry point only called by read/write
885 * routines for the PHY and NVM parts.
887 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
891 DEBUGFUNC("e1000_check_mng_mode_pchlan");
893 fwsm = E1000_READ_REG(hw, E1000_FWSM);
895 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
896 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
900 * e1000_rar_set_pch2lan - Set receive address register
901 * @hw: pointer to the HW structure
902 * @addr: pointer to the receive address
903 * @index: receive address array register
905 * Sets the receive address array register at index to the address passed
906 * in by addr. For 82579, RAR[0] is the base address register that is to
907 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
908 * Use SHRA[0-3] in place of those reserved for ME.
910 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
912 u32 rar_low, rar_high;
914 DEBUGFUNC("e1000_rar_set_pch2lan");
917 * HW expects these in little endian so we reverse the byte order
918 * from network order (big endian) to little endian
920 rar_low = ((u32) addr[0] |
921 ((u32) addr[1] << 8) |
922 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
924 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
926 /* If MAC address zero, no need to set the AV bit */
927 if (rar_low || rar_high)
928 rar_high |= E1000_RAH_AV;
931 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
932 E1000_WRITE_FLUSH(hw);
933 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
934 E1000_WRITE_FLUSH(hw);
938 if (index < hw->mac.rar_entry_count) {
939 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
940 E1000_WRITE_FLUSH(hw);
941 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
942 E1000_WRITE_FLUSH(hw);
944 /* verify the register updates */
945 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
946 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
949 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
950 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
953 DEBUGOUT1("Failed to write receive address at index %d\n", index);
957 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
958 * @hw: pointer to the HW structure
959 * @mc_addr_list: array of multicast addresses to program
960 * @mc_addr_count: number of multicast addresses to program
962 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
963 * The caller must have a packed mc_addr_list of multicast addresses.
965 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
971 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
973 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
975 for (i = 0; i < hw->mac.mta_reg_count; i++) {
976 hw->phy.ops.write_reg(hw, BM_MTA(i),
977 (u16)(hw->mac.mta_shadow[i] & 0xFFFF));
978 hw->phy.ops.write_reg(hw, (BM_MTA(i) + 1),
979 (u16)((hw->mac.mta_shadow[i] >> 16) &
985 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
986 * @hw: pointer to the HW structure
988 * Checks if firmware is blocking the reset of the PHY.
989 * This is a function pointer entry point only called by
992 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
996 DEBUGFUNC("e1000_check_reset_block_ich8lan");
998 if (hw->phy.reset_disable)
999 return E1000_BLK_PHY_RESET;
1001 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1003 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1004 : E1000_BLK_PHY_RESET;
1008 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1009 * @hw: pointer to the HW structure
1011 * Assumes semaphore already acquired.
1014 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1017 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1018 s32 ret_val = E1000_SUCCESS;
1020 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1022 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1026 phy_data &= ~HV_SMB_ADDR_MASK;
1027 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1028 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1029 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1036 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1037 * @hw: pointer to the HW structure
1039 * SW should configure the LCD from the NVM extended configuration region
1040 * as a workaround for certain parts.
1042 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1044 struct e1000_phy_info *phy = &hw->phy;
1045 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1046 s32 ret_val = E1000_SUCCESS;
1047 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1049 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1052 * Initialize the PHY from the NVM on ICH platforms. This
1053 * is needed due to an issue where the NVM configuration is
1054 * not properly autoloaded after power transitions.
1055 * Therefore, after each PHY reset, we will load the
1056 * configuration data out of the NVM manually.
1058 switch (hw->mac.type) {
1060 if (phy->type != e1000_phy_igp_3)
1063 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1064 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1065 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1071 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1077 ret_val = hw->phy.ops.acquire(hw);
1081 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1082 if (!(data & sw_cfg_mask))
1086 * Make sure HW does not configure LCD from PHY
1087 * extended configuration before SW configuration
1089 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1090 if (!(hw->mac.type == e1000_pch2lan)) {
1091 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1095 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1096 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1097 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1101 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1102 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1104 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1105 (hw->mac.type == e1000_pchlan)) ||
1106 (hw->mac.type == e1000_pch2lan)) {
1108 * HW configures the SMBus address and LEDs when the
1109 * OEM and LCD Write Enable bits are set in the NVM.
1110 * When both NVM bits are cleared, SW will configure
1113 ret_val = e1000_write_smbus_addr(hw);
1117 data = E1000_READ_REG(hw, E1000_LEDCTL);
1118 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1124 /* Configure LCD from extended configuration region. */
1126 /* cnf_base_addr is in DWORD */
1127 word_addr = (u16)(cnf_base_addr << 1);
1129 for (i = 0; i < cnf_size; i++) {
1130 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1135 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1140 /* Save off the PHY page for future writes. */
1141 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1142 phy_page = reg_data;
1146 reg_addr &= PHY_REG_MASK;
1147 reg_addr |= phy_page;
1149 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1156 hw->phy.ops.release(hw);
1161 * e1000_k1_gig_workaround_hv - K1 Si workaround
1162 * @hw: pointer to the HW structure
1163 * @link: link up bool flag
1165 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1166 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1167 * If link is down, the function will restore the default K1 setting located
1170 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1172 s32 ret_val = E1000_SUCCESS;
1174 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1176 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1178 if (hw->mac.type != e1000_pchlan)
1181 /* Wrap the whole flow with the sw flag */
1182 ret_val = hw->phy.ops.acquire(hw);
1186 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1188 if (hw->phy.type == e1000_phy_82578) {
1189 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1194 status_reg &= BM_CS_STATUS_LINK_UP |
1195 BM_CS_STATUS_RESOLVED |
1196 BM_CS_STATUS_SPEED_MASK;
1198 if (status_reg == (BM_CS_STATUS_LINK_UP |
1199 BM_CS_STATUS_RESOLVED |
1200 BM_CS_STATUS_SPEED_1000))
1204 if (hw->phy.type == e1000_phy_82577) {
1205 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1210 status_reg &= HV_M_STATUS_LINK_UP |
1211 HV_M_STATUS_AUTONEG_COMPLETE |
1212 HV_M_STATUS_SPEED_MASK;
1214 if (status_reg == (HV_M_STATUS_LINK_UP |
1215 HV_M_STATUS_AUTONEG_COMPLETE |
1216 HV_M_STATUS_SPEED_1000))
1220 /* Link stall fix for link up */
1221 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1227 /* Link stall fix for link down */
1228 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1234 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1237 hw->phy.ops.release(hw);
1243 * e1000_configure_k1_ich8lan - Configure K1 power state
1244 * @hw: pointer to the HW structure
1245 * @enable: K1 state to configure
1247 * Configure the K1 power state based on the provided parameter.
1248 * Assumes semaphore already acquired.
1250 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1252 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1254 s32 ret_val = E1000_SUCCESS;
1260 DEBUGFUNC("e1000_configure_k1_ich8lan");
1262 ret_val = e1000_read_kmrn_reg_locked(hw,
1263 E1000_KMRNCTRLSTA_K1_CONFIG,
1269 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1271 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1273 ret_val = e1000_write_kmrn_reg_locked(hw,
1274 E1000_KMRNCTRLSTA_K1_CONFIG,
1280 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1281 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1283 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1284 reg |= E1000_CTRL_FRCSPD;
1285 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1287 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1289 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1290 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1298 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1299 * @hw: pointer to the HW structure
1300 * @d0_state: boolean if entering d0 or d3 device state
1302 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1303 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1304 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1306 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1312 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1314 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1317 ret_val = hw->phy.ops.acquire(hw);
1321 if (!(hw->mac.type == e1000_pch2lan)) {
1322 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1323 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1327 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1328 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1331 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1333 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1337 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1340 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1341 oem_reg |= HV_OEM_BITS_GBE_DIS;
1343 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1344 oem_reg |= HV_OEM_BITS_LPLU;
1346 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1347 oem_reg |= HV_OEM_BITS_GBE_DIS;
1349 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1350 oem_reg |= HV_OEM_BITS_LPLU;
1352 /* Restart auto-neg to activate the bits */
1353 if (!hw->phy.ops.check_reset_block(hw))
1354 oem_reg |= HV_OEM_BITS_RESTART_AN;
1355 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1358 hw->phy.ops.release(hw);
1365 * e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
1366 * @hw: pointer to the HW structure
1368 s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
1370 DEBUGFUNC("e1000_hv_phy_powerdown_workaround_ich8lan");
1372 if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
1373 return E1000_SUCCESS;
1375 return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444);
1379 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1380 * @hw: pointer to the HW structure
1382 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1387 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1389 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1393 data |= HV_KMRN_MDIO_SLOW;
1395 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1401 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1402 * done after every PHY reset.
1404 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1406 s32 ret_val = E1000_SUCCESS;
1409 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1411 if (hw->mac.type != e1000_pchlan)
1414 /* Set MDIO slow mode before any other MDIO access */
1415 if (hw->phy.type == e1000_phy_82577) {
1416 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1421 /* Hanksville M Phy init for IEEE. */
1422 if ((hw->revision_id == 2) &&
1423 (hw->phy.type == e1000_phy_82577) &&
1424 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1425 hw->phy.ops.write_reg(hw, 0x10, 0x8823);
1426 hw->phy.ops.write_reg(hw, 0x11, 0x0018);
1427 hw->phy.ops.write_reg(hw, 0x10, 0x8824);
1428 hw->phy.ops.write_reg(hw, 0x11, 0x0016);
1429 hw->phy.ops.write_reg(hw, 0x10, 0x8825);
1430 hw->phy.ops.write_reg(hw, 0x11, 0x001A);
1431 hw->phy.ops.write_reg(hw, 0x10, 0x888C);
1432 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1433 hw->phy.ops.write_reg(hw, 0x10, 0x888D);
1434 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1435 hw->phy.ops.write_reg(hw, 0x10, 0x888E);
1436 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1437 hw->phy.ops.write_reg(hw, 0x10, 0x8827);
1438 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1439 hw->phy.ops.write_reg(hw, 0x10, 0x8835);
1440 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1441 hw->phy.ops.write_reg(hw, 0x10, 0x8834);
1442 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1443 hw->phy.ops.write_reg(hw, 0x10, 0x8833);
1444 hw->phy.ops.write_reg(hw, 0x11, 0x0002);
1447 if (((hw->phy.type == e1000_phy_82577) &&
1448 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1449 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1450 /* Disable generation of early preamble */
1451 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1455 /* Preamble tuning for SSC */
1456 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1461 if (hw->phy.type == e1000_phy_82578) {
1462 if (hw->revision_id < 3) {
1464 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29,
1470 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E,
1477 * Return registers to default by doing a soft reset then
1478 * writing 0x3140 to the control register.
1480 if (hw->phy.revision < 2) {
1481 e1000_phy_sw_reset_generic(hw);
1482 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1487 if ((hw->revision_id == 2) &&
1488 (hw->phy.type == e1000_phy_82577) &&
1489 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1491 * Workaround for OEM (GbE) not operating after reset -
1492 * restart AN (twice)
1494 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1497 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1503 ret_val = hw->phy.ops.acquire(hw);
1508 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1509 hw->phy.ops.release(hw);
1514 * Configure the K1 Si workaround during phy reset assuming there is
1515 * link so that it disables K1 if link is in 1Gbps.
1517 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1521 /* Workaround for link disconnects on a busy hub in half duplex */
1522 ret_val = hw->phy.ops.acquire(hw);
1525 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG_REG,
1529 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG_REG,
1532 hw->phy.ops.release(hw);
1538 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1539 * @hw: pointer to the HW structure
1541 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1546 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1548 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1549 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1550 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1551 hw->phy.ops.write_reg(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
1552 hw->phy.ops.write_reg(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
1553 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1554 hw->phy.ops.write_reg(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
1555 hw->phy.ops.write_reg(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
1559 static u32 e1000_calc_rx_da_crc(u8 mac[])
1561 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1562 u32 i, j, mask, crc;
1564 DEBUGFUNC("e1000_calc_rx_da_crc");
1567 for (i = 0; i < 6; i++) {
1569 for (j = 8; j > 0; j--) {
1570 mask = (crc & 1) * (-1);
1571 crc = (crc >> 1) ^ (poly & mask);
1578 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1580 * @hw: pointer to the HW structure
1581 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1583 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1585 s32 ret_val = E1000_SUCCESS;
1590 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1592 if (hw->mac.type != e1000_pch2lan)
1595 /* disable Rx path while enabling/disabling workaround */
1596 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1597 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1603 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1604 * SHRAL/H) and initial CRC values to the MAC
1606 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1607 u8 mac_addr[ETH_ADDR_LEN] = {0};
1608 u32 addr_high, addr_low;
1610 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1611 if (!(addr_high & E1000_RAH_AV))
1613 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1614 mac_addr[0] = (addr_low & 0xFF);
1615 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1616 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1617 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1618 mac_addr[4] = (addr_high & 0xFF);
1619 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1621 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1622 e1000_calc_rx_da_crc(mac_addr));
1625 /* Write Rx addresses to the PHY */
1626 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1628 /* Enable jumbo frame workaround in the MAC */
1629 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1630 mac_reg &= ~(1 << 14);
1631 mac_reg |= (7 << 15);
1632 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1634 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1635 mac_reg |= E1000_RCTL_SECRC;
1636 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1638 ret_val = e1000_read_kmrn_reg_generic(hw,
1639 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1643 ret_val = e1000_write_kmrn_reg_generic(hw,
1644 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1648 ret_val = e1000_read_kmrn_reg_generic(hw,
1649 E1000_KMRNCTRLSTA_HD_CTRL,
1653 data &= ~(0xF << 8);
1655 ret_val = e1000_write_kmrn_reg_generic(hw,
1656 E1000_KMRNCTRLSTA_HD_CTRL,
1661 /* Enable jumbo frame workaround in the PHY */
1662 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1663 data &= ~(0x7F << 5);
1664 data |= (0x37 << 5);
1665 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1668 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1670 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1673 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1674 data &= ~(0x3FF << 2);
1675 data |= (0x1A << 2);
1676 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1679 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00);
1682 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1683 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10));
1687 /* Write MAC register values back to h/w defaults */
1688 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1689 mac_reg &= ~(0xF << 14);
1690 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1692 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1693 mac_reg &= ~E1000_RCTL_SECRC;
1694 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1696 ret_val = e1000_read_kmrn_reg_generic(hw,
1697 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1701 ret_val = e1000_write_kmrn_reg_generic(hw,
1702 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1706 ret_val = e1000_read_kmrn_reg_generic(hw,
1707 E1000_KMRNCTRLSTA_HD_CTRL,
1711 data &= ~(0xF << 8);
1713 ret_val = e1000_write_kmrn_reg_generic(hw,
1714 E1000_KMRNCTRLSTA_HD_CTRL,
1719 /* Write PHY register values back to h/w defaults */
1720 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1721 data &= ~(0x7F << 5);
1722 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1725 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1727 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1730 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1731 data &= ~(0x3FF << 2);
1733 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1736 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1739 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1740 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10));
1745 /* re-enable Rx path after enabling/disabling workaround */
1746 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1753 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1754 * done after every PHY reset.
1756 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1758 s32 ret_val = E1000_SUCCESS;
1760 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1762 if (hw->mac.type != e1000_pch2lan)
1765 /* Set MDIO slow mode before any other MDIO access */
1766 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1773 * e1000_k1_gig_workaround_lv - K1 Si workaround
1774 * @hw: pointer to the HW structure
1776 * Workaround to set the K1 beacon duration for 82579 parts
1778 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1780 s32 ret_val = E1000_SUCCESS;
1784 DEBUGFUNC("e1000_k1_workaround_lv");
1786 if (hw->mac.type != e1000_pch2lan)
1789 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1790 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1794 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1795 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1796 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1797 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1799 if (status_reg & HV_M_STATUS_SPEED_1000)
1800 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1802 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1804 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1812 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1813 * @hw: pointer to the HW structure
1814 * @gate: boolean set to TRUE to gate, FALSE to ungate
1816 * Gate/ungate the automatic PHY configuration via hardware; perform
1817 * the configuration via software instead.
1819 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1823 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1825 if (hw->mac.type != e1000_pch2lan)
1828 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1831 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1833 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1835 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1840 * e1000_hv_phy_tuning_workaround_ich8lan - This is a Phy tuning work around
1841 * needed for Nahum3 + Hanksville testing, requested by HW team
1843 static s32 e1000_hv_phy_tuning_workaround_ich8lan(struct e1000_hw *hw)
1845 s32 ret_val = E1000_SUCCESS;
1847 DEBUGFUNC("e1000_hv_phy_tuning_workaround_ich8lan");
1849 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1853 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1857 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 0x66C0);
1861 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 0xFFFF);
1868 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1869 * @hw: pointer to the HW structure
1871 * Check the appropriate indication the MAC has finished configuring the
1872 * PHY after a software reset.
1874 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1876 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1878 DEBUGFUNC("e1000_lan_init_done_ich8lan");
1880 /* Wait for basic configuration completes before proceeding */
1882 data = E1000_READ_REG(hw, E1000_STATUS);
1883 data &= E1000_STATUS_LAN_INIT_DONE;
1885 } while ((!data) && --loop);
1888 * If basic configuration is incomplete before the above loop
1889 * count reaches 0, loading the configuration from NVM will
1890 * leave the PHY in a bad state possibly resulting in no link.
1893 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1895 /* Clear the Init Done bit for the next init event */
1896 data = E1000_READ_REG(hw, E1000_STATUS);
1897 data &= ~E1000_STATUS_LAN_INIT_DONE;
1898 E1000_WRITE_REG(hw, E1000_STATUS, data);
1902 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1903 * @hw: pointer to the HW structure
1905 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1907 s32 ret_val = E1000_SUCCESS;
1910 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1912 if (hw->phy.ops.check_reset_block(hw))
1915 /* Allow time for h/w to get to quiescent state after reset */
1918 /* Perform any necessary post-reset workarounds */
1919 switch (hw->mac.type) {
1921 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1926 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1934 if (hw->device_id == E1000_DEV_ID_ICH10_HANKSVILLE) {
1935 ret_val = e1000_hv_phy_tuning_workaround_ich8lan(hw);
1940 /* Dummy read to clear the phy wakeup bit after lcd reset */
1941 if (hw->mac.type >= e1000_pchlan)
1942 hw->phy.ops.read_reg(hw, BM_WUC, ®);
1944 /* Configure the LCD with the extended configuration region in NVM */
1945 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1949 /* Configure the LCD with the OEM bits in NVM */
1950 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1952 if (hw->mac.type == e1000_pch2lan) {
1953 /* Ungate automatic PHY configuration on non-managed 82579 */
1954 if (!(E1000_READ_REG(hw, E1000_FWSM) &
1955 E1000_ICH_FWSM_FW_VALID)) {
1957 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1960 /* Set EEE LPI Update Timer to 200usec */
1961 ret_val = hw->phy.ops.acquire(hw);
1964 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1965 I82579_LPI_UPDATE_TIMER);
1968 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1971 hw->phy.ops.release(hw);
1979 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1980 * @hw: pointer to the HW structure
1983 * This is a function pointer entry point called by drivers
1984 * or other shared routines.
1986 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1988 s32 ret_val = E1000_SUCCESS;
1990 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1992 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1993 if ((hw->mac.type == e1000_pch2lan) &&
1994 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
1995 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
1997 ret_val = e1000_phy_hw_reset_generic(hw);
2001 ret_val = e1000_post_phy_reset_ich8lan(hw);
2008 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2009 * @hw: pointer to the HW structure
2010 * @active: TRUE to enable LPLU, FALSE to disable
2012 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2013 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2014 * the phy speed. This function will manually set the LPLU bit and restart
2015 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2016 * since it configures the same bit.
2018 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2020 s32 ret_val = E1000_SUCCESS;
2023 DEBUGFUNC("e1000_set_lplu_state_pchlan");
2025 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2030 oem_reg |= HV_OEM_BITS_LPLU;
2032 oem_reg &= ~HV_OEM_BITS_LPLU;
2034 oem_reg |= HV_OEM_BITS_RESTART_AN;
2035 ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2042 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2043 * @hw: pointer to the HW structure
2044 * @active: TRUE to enable LPLU, FALSE to disable
2046 * Sets the LPLU D0 state according to the active flag. When
2047 * activating LPLU this function also disables smart speed
2048 * and vice versa. LPLU will not be activated unless the
2049 * device autonegotiation advertisement meets standards of
2050 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2051 * This is a function pointer entry point only called by
2052 * PHY setup routines.
2054 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2056 struct e1000_phy_info *phy = &hw->phy;
2058 s32 ret_val = E1000_SUCCESS;
2061 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2063 if (phy->type == e1000_phy_ife)
2066 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2069 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2070 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2072 if (phy->type != e1000_phy_igp_3)
2076 * Call gig speed drop workaround on LPLU before accessing
2079 if (hw->mac.type == e1000_ich8lan)
2080 e1000_gig_downshift_workaround_ich8lan(hw);
2082 /* When LPLU is enabled, we should disable SmartSpeed */
2083 ret_val = phy->ops.read_reg(hw,
2084 IGP01E1000_PHY_PORT_CONFIG,
2086 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2087 ret_val = phy->ops.write_reg(hw,
2088 IGP01E1000_PHY_PORT_CONFIG,
2093 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2094 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2096 if (phy->type != e1000_phy_igp_3)
2100 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2101 * during Dx states where the power conservation is most
2102 * important. During driver activity we should enable
2103 * SmartSpeed, so performance is maintained.
2105 if (phy->smart_speed == e1000_smart_speed_on) {
2106 ret_val = phy->ops.read_reg(hw,
2107 IGP01E1000_PHY_PORT_CONFIG,
2112 data |= IGP01E1000_PSCFR_SMART_SPEED;
2113 ret_val = phy->ops.write_reg(hw,
2114 IGP01E1000_PHY_PORT_CONFIG,
2118 } else if (phy->smart_speed == e1000_smart_speed_off) {
2119 ret_val = phy->ops.read_reg(hw,
2120 IGP01E1000_PHY_PORT_CONFIG,
2125 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2126 ret_val = phy->ops.write_reg(hw,
2127 IGP01E1000_PHY_PORT_CONFIG,
2139 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2140 * @hw: pointer to the HW structure
2141 * @active: TRUE to enable LPLU, FALSE to disable
2143 * Sets the LPLU D3 state according to the active flag. When
2144 * activating LPLU this function also disables smart speed
2145 * and vice versa. LPLU will not be activated unless the
2146 * device autonegotiation advertisement meets standards of
2147 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2148 * This is a function pointer entry point only called by
2149 * PHY setup routines.
2151 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2153 struct e1000_phy_info *phy = &hw->phy;
2155 s32 ret_val = E1000_SUCCESS;
2158 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2160 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2163 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2164 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2166 if (phy->type != e1000_phy_igp_3)
2170 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2171 * during Dx states where the power conservation is most
2172 * important. During driver activity we should enable
2173 * SmartSpeed, so performance is maintained.
2175 if (phy->smart_speed == e1000_smart_speed_on) {
2176 ret_val = phy->ops.read_reg(hw,
2177 IGP01E1000_PHY_PORT_CONFIG,
2182 data |= IGP01E1000_PSCFR_SMART_SPEED;
2183 ret_val = phy->ops.write_reg(hw,
2184 IGP01E1000_PHY_PORT_CONFIG,
2188 } else if (phy->smart_speed == e1000_smart_speed_off) {
2189 ret_val = phy->ops.read_reg(hw,
2190 IGP01E1000_PHY_PORT_CONFIG,
2195 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2196 ret_val = phy->ops.write_reg(hw,
2197 IGP01E1000_PHY_PORT_CONFIG,
2202 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2203 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2204 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2205 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2206 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2208 if (phy->type != e1000_phy_igp_3)
2212 * Call gig speed drop workaround on LPLU before accessing
2215 if (hw->mac.type == e1000_ich8lan)
2216 e1000_gig_downshift_workaround_ich8lan(hw);
2218 /* When LPLU is enabled, we should disable SmartSpeed */
2219 ret_val = phy->ops.read_reg(hw,
2220 IGP01E1000_PHY_PORT_CONFIG,
2225 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2226 ret_val = phy->ops.write_reg(hw,
2227 IGP01E1000_PHY_PORT_CONFIG,
2236 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2237 * @hw: pointer to the HW structure
2238 * @bank: pointer to the variable that returns the active bank
2240 * Reads signature byte from the NVM using the flash access registers.
2241 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2243 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2246 struct e1000_nvm_info *nvm = &hw->nvm;
2247 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2248 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2250 s32 ret_val = E1000_SUCCESS;
2252 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2254 switch (hw->mac.type) {
2257 eecd = E1000_READ_REG(hw, E1000_EECD);
2258 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2259 E1000_EECD_SEC1VAL_VALID_MASK) {
2260 if (eecd & E1000_EECD_SEC1VAL)
2267 DEBUGOUT("Unable to determine valid NVM bank via EEC - "
2268 "reading flash signature\n");
2271 /* set bank to 0 in case flash read fails */
2275 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2279 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2280 E1000_ICH_NVM_SIG_VALUE) {
2286 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2291 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2292 E1000_ICH_NVM_SIG_VALUE) {
2297 DEBUGOUT("ERROR: No valid NVM bank present\n");
2298 ret_val = -E1000_ERR_NVM;
2306 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2307 * @hw: pointer to the HW structure
2308 * @offset: The offset (in bytes) of the word(s) to read.
2309 * @words: Size of data to read in words
2310 * @data: Pointer to the word(s) to read at offset.
2312 * Reads a word(s) from the NVM using the flash access registers.
2314 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2317 struct e1000_nvm_info *nvm = &hw->nvm;
2318 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2320 s32 ret_val = E1000_SUCCESS;
2324 DEBUGFUNC("e1000_read_nvm_ich8lan");
2326 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2328 DEBUGOUT("nvm parameter(s) out of bounds\n");
2329 ret_val = -E1000_ERR_NVM;
2333 nvm->ops.acquire(hw);
2335 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2336 if (ret_val != E1000_SUCCESS) {
2337 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2341 act_offset = (bank) ? nvm->flash_bank_size : 0;
2342 act_offset += offset;
2344 ret_val = E1000_SUCCESS;
2345 for (i = 0; i < words; i++) {
2346 if ((dev_spec->shadow_ram) &&
2347 (dev_spec->shadow_ram[offset+i].modified)) {
2348 data[i] = dev_spec->shadow_ram[offset+i].value;
2350 ret_val = e1000_read_flash_word_ich8lan(hw,
2359 nvm->ops.release(hw);
2363 DEBUGOUT1("NVM read error: %d\n", ret_val);
2369 * e1000_flash_cycle_init_ich8lan - Initialize flash
2370 * @hw: pointer to the HW structure
2372 * This function does initial flash setup so that a new read/write/erase cycle
2375 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2377 union ich8_hws_flash_status hsfsts;
2378 s32 ret_val = -E1000_ERR_NVM;
2380 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2382 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2384 /* Check if the flash descriptor is valid */
2385 if (hsfsts.hsf_status.fldesvalid == 0) {
2386 DEBUGOUT("Flash descriptor invalid. "
2387 "SW Sequencing must be used.");
2391 /* Clear FCERR and DAEL in hw status by writing 1 */
2392 hsfsts.hsf_status.flcerr = 1;
2393 hsfsts.hsf_status.dael = 1;
2395 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2398 * Either we should have a hardware SPI cycle in progress
2399 * bit to check against, in order to start a new cycle or
2400 * FDONE bit should be changed in the hardware so that it
2401 * is 1 after hardware reset, which can then be used as an
2402 * indication whether a cycle is in progress or has been
2406 if (hsfsts.hsf_status.flcinprog == 0) {
2408 * There is no cycle running at present,
2409 * so we can start a cycle.
2410 * Begin by setting Flash Cycle Done.
2412 hsfsts.hsf_status.flcdone = 1;
2413 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2414 ret_val = E1000_SUCCESS;
2419 * Otherwise poll for sometime so the current
2420 * cycle has a chance to end before giving up.
2422 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2423 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2425 if (hsfsts.hsf_status.flcinprog == 0) {
2426 ret_val = E1000_SUCCESS;
2431 if (ret_val == E1000_SUCCESS) {
2433 * Successful in waiting for previous cycle to timeout,
2434 * now set the Flash Cycle Done.
2436 hsfsts.hsf_status.flcdone = 1;
2437 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2440 DEBUGOUT("Flash controller busy, cannot get access");
2449 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2450 * @hw: pointer to the HW structure
2451 * @timeout: maximum time to wait for completion
2453 * This function starts a flash cycle and waits for its completion.
2455 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2457 union ich8_hws_flash_ctrl hsflctl;
2458 union ich8_hws_flash_status hsfsts;
2459 s32 ret_val = -E1000_ERR_NVM;
2462 DEBUGFUNC("e1000_flash_cycle_ich8lan");
2464 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2465 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2466 hsflctl.hsf_ctrl.flcgo = 1;
2467 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2469 /* wait till FDONE bit is set to 1 */
2471 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2472 if (hsfsts.hsf_status.flcdone == 1)
2475 } while (i++ < timeout);
2477 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2478 ret_val = E1000_SUCCESS;
2484 * e1000_read_flash_word_ich8lan - Read word from flash
2485 * @hw: pointer to the HW structure
2486 * @offset: offset to data location
2487 * @data: pointer to the location for storing the data
2489 * Reads the flash word at offset into data. Offset is converted
2490 * to bytes before read.
2492 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2497 DEBUGFUNC("e1000_read_flash_word_ich8lan");
2500 ret_val = -E1000_ERR_NVM;
2504 /* Must convert offset into bytes. */
2507 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2514 * e1000_read_flash_byte_ich8lan - Read byte from flash
2515 * @hw: pointer to the HW structure
2516 * @offset: The offset of the byte to read.
2517 * @data: Pointer to a byte to store the value read.
2519 * Reads a single byte from the NVM using the flash access registers.
2521 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2524 s32 ret_val = E1000_SUCCESS;
2527 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2538 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2539 * @hw: pointer to the HW structure
2540 * @offset: The offset (in bytes) of the byte or word to read.
2541 * @size: Size of data to read, 1=byte 2=word
2542 * @data: Pointer to the word to store the value read.
2544 * Reads a byte or word from the NVM using the flash access registers.
2546 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2549 union ich8_hws_flash_status hsfsts;
2550 union ich8_hws_flash_ctrl hsflctl;
2551 u32 flash_linear_addr;
2553 s32 ret_val = -E1000_ERR_NVM;
2556 DEBUGFUNC("e1000_read_flash_data_ich8lan");
2558 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2561 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2562 hw->nvm.flash_base_addr;
2567 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2568 if (ret_val != E1000_SUCCESS)
2571 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2572 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2573 hsflctl.hsf_ctrl.fldbcount = size - 1;
2574 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2575 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2577 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2579 ret_val = e1000_flash_cycle_ich8lan(hw,
2580 ICH_FLASH_READ_COMMAND_TIMEOUT);
2583 * Check if FCERR is set to 1, if set to 1, clear it
2584 * and try the whole sequence a few more times, else
2585 * read in (shift in) the Flash Data0, the order is
2586 * least significant byte first msb to lsb
2588 if (ret_val == E1000_SUCCESS) {
2589 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2591 *data = (u8)(flash_data & 0x000000FF);
2593 *data = (u16)(flash_data & 0x0000FFFF);
2597 * If we've gotten here, then things are probably
2598 * completely hosed, but if the error condition is
2599 * detected, it won't hurt to give it another try...
2600 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2602 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2604 if (hsfsts.hsf_status.flcerr == 1) {
2605 /* Repeat for some time before giving up. */
2607 } else if (hsfsts.hsf_status.flcdone == 0) {
2608 DEBUGOUT("Timeout error - flash cycle "
2609 "did not complete.");
2613 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2620 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2621 * @hw: pointer to the HW structure
2622 * @offset: The offset (in bytes) of the word(s) to write.
2623 * @words: Size of data to write in words
2624 * @data: Pointer to the word(s) to write at offset.
2626 * Writes a byte or word to the NVM using the flash access registers.
2628 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2631 struct e1000_nvm_info *nvm = &hw->nvm;
2632 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2633 s32 ret_val = E1000_SUCCESS;
2636 DEBUGFUNC("e1000_write_nvm_ich8lan");
2638 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2640 DEBUGOUT("nvm parameter(s) out of bounds\n");
2641 ret_val = -E1000_ERR_NVM;
2645 nvm->ops.acquire(hw);
2647 for (i = 0; i < words; i++) {
2648 dev_spec->shadow_ram[offset+i].modified = TRUE;
2649 dev_spec->shadow_ram[offset+i].value = data[i];
2652 nvm->ops.release(hw);
2659 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2660 * @hw: pointer to the HW structure
2662 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2663 * which writes the checksum to the shadow ram. The changes in the shadow
2664 * ram are then committed to the EEPROM by processing each bank at a time
2665 * checking for the modified bit and writing only the pending changes.
2666 * After a successful commit, the shadow ram is cleared and is ready for
2669 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2671 struct e1000_nvm_info *nvm = &hw->nvm;
2672 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2673 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2677 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2679 ret_val = e1000_update_nvm_checksum_generic(hw);
2683 if (nvm->type != e1000_nvm_flash_sw)
2686 nvm->ops.acquire(hw);
2689 * We're writing to the opposite bank so if we're on bank 1,
2690 * write to bank 0 etc. We also need to erase the segment that
2691 * is going to be written
2693 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2694 if (ret_val != E1000_SUCCESS) {
2695 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2700 new_bank_offset = nvm->flash_bank_size;
2701 old_bank_offset = 0;
2702 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2706 old_bank_offset = nvm->flash_bank_size;
2707 new_bank_offset = 0;
2708 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2713 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2715 * Determine whether to write the value stored
2716 * in the other NVM bank or a modified value stored
2719 if (dev_spec->shadow_ram[i].modified) {
2720 data = dev_spec->shadow_ram[i].value;
2722 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2730 * If the word is 0x13, then make sure the signature bits
2731 * (15:14) are 11b until the commit has completed.
2732 * This will allow us to write 10b which indicates the
2733 * signature is valid. We want to do this after the write
2734 * has completed so that we don't mark the segment valid
2735 * while the write is still in progress
2737 if (i == E1000_ICH_NVM_SIG_WORD)
2738 data |= E1000_ICH_NVM_SIG_MASK;
2740 /* Convert offset to bytes. */
2741 act_offset = (i + new_bank_offset) << 1;
2744 /* Write the bytes to the new bank. */
2745 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2752 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2760 * Don't bother writing the segment valid bits if sector
2761 * programming failed.
2764 DEBUGOUT("Flash commit failed.\n");
2769 * Finally validate the new segment by setting bit 15:14
2770 * to 10b in word 0x13 , this can be done without an
2771 * erase as well since these bits are 11 to start with
2772 * and we need to change bit 14 to 0b
2774 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2775 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2780 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2787 * And invalidate the previously valid segment by setting
2788 * its signature word (0x13) high_byte to 0b. This can be
2789 * done without an erase because flash erase sets all bits
2790 * to 1's. We can write 1's to 0's without an erase
2792 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2793 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2797 /* Great! Everything worked, we can now clear the cached entries. */
2798 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2799 dev_spec->shadow_ram[i].modified = FALSE;
2800 dev_spec->shadow_ram[i].value = 0xFFFF;
2804 nvm->ops.release(hw);
2807 * Reload the EEPROM, or else modifications will not appear
2808 * until after the next adapter reset.
2811 nvm->ops.reload(hw);
2817 DEBUGOUT1("NVM update error: %d\n", ret_val);
2823 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2824 * @hw: pointer to the HW structure
2826 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2827 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2828 * calculated, in which case we need to calculate the checksum and set bit 6.
2830 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2832 s32 ret_val = E1000_SUCCESS;
2835 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2838 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2839 * needs to be fixed. This bit is an indication that the NVM
2840 * was prepared by OEM software and did not calculate the
2841 * checksum...a likely scenario.
2843 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2847 if ((data & 0x40) == 0) {
2849 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2852 ret_val = hw->nvm.ops.update(hw);
2857 ret_val = e1000_validate_nvm_checksum_generic(hw);
2864 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2865 * @hw: pointer to the HW structure
2866 * @offset: The offset (in bytes) of the byte/word to read.
2867 * @size: Size of data to read, 1=byte 2=word
2868 * @data: The byte(s) to write to the NVM.
2870 * Writes one/two bytes to the NVM using the flash access registers.
2872 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2875 union ich8_hws_flash_status hsfsts;
2876 union ich8_hws_flash_ctrl hsflctl;
2877 u32 flash_linear_addr;
2879 s32 ret_val = -E1000_ERR_NVM;
2882 DEBUGFUNC("e1000_write_ich8_data");
2884 if (size < 1 || size > 2 || data > size * 0xff ||
2885 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2888 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2889 hw->nvm.flash_base_addr;
2894 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2895 if (ret_val != E1000_SUCCESS)
2898 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2899 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2900 hsflctl.hsf_ctrl.fldbcount = size - 1;
2901 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2902 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2904 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2907 flash_data = (u32)data & 0x00FF;
2909 flash_data = (u32)data;
2911 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2914 * check if FCERR is set to 1 , if set to 1, clear it
2915 * and try the whole sequence a few more times else done
2917 ret_val = e1000_flash_cycle_ich8lan(hw,
2918 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2919 if (ret_val == E1000_SUCCESS)
2923 * If we're here, then things are most likely
2924 * completely hosed, but if the error condition
2925 * is detected, it won't hurt to give it another
2926 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2928 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2929 if (hsfsts.hsf_status.flcerr == 1)
2930 /* Repeat for some time before giving up. */
2932 if (hsfsts.hsf_status.flcdone == 0) {
2933 DEBUGOUT("Timeout error - flash cycle "
2934 "did not complete.");
2937 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2944 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2945 * @hw: pointer to the HW structure
2946 * @offset: The index of the byte to read.
2947 * @data: The byte to write to the NVM.
2949 * Writes a single byte to the NVM using the flash access registers.
2951 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2954 u16 word = (u16)data;
2956 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2958 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2962 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2963 * @hw: pointer to the HW structure
2964 * @offset: The offset of the byte to write.
2965 * @byte: The byte to write to the NVM.
2967 * Writes a single byte to the NVM using the flash access registers.
2968 * Goes through a retry algorithm before giving up.
2970 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2971 u32 offset, u8 byte)
2974 u16 program_retries;
2976 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2978 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2979 if (ret_val == E1000_SUCCESS)
2982 for (program_retries = 0; program_retries < 100; program_retries++) {
2983 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2985 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2986 if (ret_val == E1000_SUCCESS)
2989 if (program_retries == 100) {
2990 ret_val = -E1000_ERR_NVM;
2999 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3000 * @hw: pointer to the HW structure
3001 * @bank: 0 for first bank, 1 for second bank, etc.
3003 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3004 * bank N is 4096 * N + flash_reg_addr.
3006 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3008 struct e1000_nvm_info *nvm = &hw->nvm;
3009 union ich8_hws_flash_status hsfsts;
3010 union ich8_hws_flash_ctrl hsflctl;
3011 u32 flash_linear_addr;
3012 /* bank size is in 16bit words - adjust to bytes */
3013 u32 flash_bank_size = nvm->flash_bank_size * 2;
3014 s32 ret_val = E1000_SUCCESS;
3016 s32 j, iteration, sector_size;
3018 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3020 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3023 * Determine HW Sector size: Read BERASE bits of hw flash status
3025 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3026 * consecutive sectors. The start index for the nth Hw sector
3027 * can be calculated as = bank * 4096 + n * 256
3028 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3029 * The start index for the nth Hw sector can be calculated
3031 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3032 * (ich9 only, otherwise error condition)
3033 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3035 switch (hsfsts.hsf_status.berasesz) {
3037 /* Hw sector size 256 */
3038 sector_size = ICH_FLASH_SEG_SIZE_256;
3039 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3042 sector_size = ICH_FLASH_SEG_SIZE_4K;
3046 sector_size = ICH_FLASH_SEG_SIZE_8K;
3050 sector_size = ICH_FLASH_SEG_SIZE_64K;
3054 ret_val = -E1000_ERR_NVM;
3058 /* Start with the base address, then add the sector offset. */
3059 flash_linear_addr = hw->nvm.flash_base_addr;
3060 flash_linear_addr += (bank) ? flash_bank_size : 0;
3062 for (j = 0; j < iteration ; j++) {
3065 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3070 * Write a value 11 (block Erase) in Flash
3071 * Cycle field in hw flash control
3073 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3075 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3076 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3080 * Write the last 24 bits of an index within the
3081 * block into Flash Linear address field in Flash
3084 flash_linear_addr += (j * sector_size);
3085 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3088 ret_val = e1000_flash_cycle_ich8lan(hw,
3089 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3090 if (ret_val == E1000_SUCCESS)
3094 * Check if FCERR is set to 1. If 1,
3095 * clear it and try the whole sequence
3096 * a few more times else Done
3098 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3100 if (hsfsts.hsf_status.flcerr == 1)
3101 /* repeat for some time before giving up */
3103 else if (hsfsts.hsf_status.flcdone == 0)
3105 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3113 * e1000_valid_led_default_ich8lan - Set the default LED settings
3114 * @hw: pointer to the HW structure
3115 * @data: Pointer to the LED settings
3117 * Reads the LED default settings from the NVM to data. If the NVM LED
3118 * settings is all 0's or F's, set the LED default to a valid LED default
3121 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3125 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3127 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3129 DEBUGOUT("NVM Read Error\n");
3133 if (*data == ID_LED_RESERVED_0000 ||
3134 *data == ID_LED_RESERVED_FFFF)
3135 *data = ID_LED_DEFAULT_ICH8LAN;
3142 * e1000_id_led_init_pchlan - store LED configurations
3143 * @hw: pointer to the HW structure
3145 * PCH does not control LEDs via the LEDCTL register, rather it uses
3146 * the PHY LED configuration register.
3148 * PCH also does not have an "always on" or "always off" mode which
3149 * complicates the ID feature. Instead of using the "on" mode to indicate
3150 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3151 * use "link_up" mode. The LEDs will still ID on request if there is no
3152 * link based on logic in e1000_led_[on|off]_pchlan().
3154 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3156 struct e1000_mac_info *mac = &hw->mac;
3158 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3159 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3160 u16 data, i, temp, shift;
3162 DEBUGFUNC("e1000_id_led_init_pchlan");
3164 /* Get default ID LED modes */
3165 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3169 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3170 mac->ledctl_mode1 = mac->ledctl_default;
3171 mac->ledctl_mode2 = mac->ledctl_default;
3173 for (i = 0; i < 4; i++) {
3174 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3177 case ID_LED_ON1_DEF2:
3178 case ID_LED_ON1_ON2:
3179 case ID_LED_ON1_OFF2:
3180 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3181 mac->ledctl_mode1 |= (ledctl_on << shift);
3183 case ID_LED_OFF1_DEF2:
3184 case ID_LED_OFF1_ON2:
3185 case ID_LED_OFF1_OFF2:
3186 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3187 mac->ledctl_mode1 |= (ledctl_off << shift);
3194 case ID_LED_DEF1_ON2:
3195 case ID_LED_ON1_ON2:
3196 case ID_LED_OFF1_ON2:
3197 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3198 mac->ledctl_mode2 |= (ledctl_on << shift);
3200 case ID_LED_DEF1_OFF2:
3201 case ID_LED_ON1_OFF2:
3202 case ID_LED_OFF1_OFF2:
3203 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3204 mac->ledctl_mode2 |= (ledctl_off << shift);
3217 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3218 * @hw: pointer to the HW structure
3220 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3221 * register, so the bus width is hard coded.
3223 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3225 struct e1000_bus_info *bus = &hw->bus;
3228 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3230 ret_val = e1000_get_bus_info_pcie_generic(hw);
3233 * ICH devices are "PCI Express"-ish. They have
3234 * a configuration space, but do not contain
3235 * PCI Express Capability registers, so bus width
3236 * must be hardcoded.
3238 if (bus->width == e1000_bus_width_unknown)
3239 bus->width = e1000_bus_width_pcie_x1;
3245 * e1000_reset_hw_ich8lan - Reset the hardware
3246 * @hw: pointer to the HW structure
3248 * Does a full reset of the hardware which includes a reset of the PHY and
3251 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3253 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3258 DEBUGFUNC("e1000_reset_hw_ich8lan");
3261 * Prevent the PCI-E bus from sticking if there is no TLP connection
3262 * on the last TLP read/write transaction when MAC is reset.
3264 ret_val = e1000_disable_pcie_master_generic(hw);
3266 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3268 DEBUGOUT("Masking off all interrupts\n");
3269 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3272 * Disable the Transmit and Receive units. Then delay to allow
3273 * any pending transactions to complete before we hit the MAC
3274 * with the global reset.
3276 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3277 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3278 E1000_WRITE_FLUSH(hw);
3282 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3283 if (hw->mac.type == e1000_ich8lan) {
3284 /* Set Tx and Rx buffer allocation to 8k apiece. */
3285 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3286 /* Set Packet Buffer Size to 16k. */
3287 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3290 if (hw->mac.type == e1000_pchlan) {
3291 /* Save the NVM K1 bit setting*/
3292 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®);
3296 if (reg & E1000_NVM_K1_ENABLE)
3297 dev_spec->nvm_k1_enabled = TRUE;
3299 dev_spec->nvm_k1_enabled = FALSE;
3302 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3304 if (!hw->phy.ops.check_reset_block(hw)) {
3306 * Full-chip reset requires MAC and PHY reset at the same
3307 * time to make sure the interface between MAC and the
3308 * external PHY is reset.
3310 ctrl |= E1000_CTRL_PHY_RST;
3313 * Gate automatic PHY configuration by hardware on
3316 if ((hw->mac.type == e1000_pch2lan) &&
3317 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3318 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3320 ret_val = e1000_acquire_swflag_ich8lan(hw);
3321 DEBUGOUT("Issuing a global reset to ich8lan\n");
3322 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3326 e1000_release_swflag_ich8lan(hw);
3328 if (ctrl & E1000_CTRL_PHY_RST) {
3329 ret_val = hw->phy.ops.get_cfg_done(hw);
3333 ret_val = e1000_post_phy_reset_ich8lan(hw);
3339 * For PCH, this write will make sure that any noise
3340 * will be detected as a CRC error and be dropped rather than show up
3341 * as a bad packet to the DMA engine.
3343 if (hw->mac.type == e1000_pchlan)
3344 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3346 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3347 E1000_READ_REG(hw, E1000_ICR);
3349 kab = E1000_READ_REG(hw, E1000_KABGTXD);
3350 kab |= E1000_KABGTXD_BGSQLBIAS;
3351 E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3358 * e1000_init_hw_ich8lan - Initialize the hardware
3359 * @hw: pointer to the HW structure
3361 * Prepares the hardware for transmit and receive by doing the following:
3362 * - initialize hardware bits
3363 * - initialize LED identification
3364 * - setup receive address registers
3365 * - setup flow control
3366 * - setup transmit descriptors
3367 * - clear statistics
3369 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3371 struct e1000_mac_info *mac = &hw->mac;
3372 u32 ctrl_ext, txdctl, snoop;
3376 DEBUGFUNC("e1000_init_hw_ich8lan");
3378 e1000_initialize_hw_bits_ich8lan(hw);
3380 /* Initialize identification LED */
3381 ret_val = mac->ops.id_led_init(hw);
3383 DEBUGOUT("Error initializing identification LED\n");
3384 /* This is not fatal and we should not stop init due to this */
3386 /* Setup the receive address. */
3387 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3389 /* Zero out the Multicast HASH table */
3390 DEBUGOUT("Zeroing the MTA\n");
3391 for (i = 0; i < mac->mta_reg_count; i++)
3392 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3395 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3396 * the ME. Reading the BM_WUC register will clear the host wakeup bit.
3397 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3399 if (hw->phy.type == e1000_phy_82578) {
3400 hw->phy.ops.read_reg(hw, BM_WUC, &i);
3401 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3406 /* Setup link and flow control */
3407 ret_val = mac->ops.setup_link(hw);
3409 /* Set the transmit descriptor write-back policy for both queues */
3410 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3411 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3412 E1000_TXDCTL_FULL_TX_DESC_WB;
3413 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3414 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3415 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3416 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3417 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3418 E1000_TXDCTL_FULL_TX_DESC_WB;
3419 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3420 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3421 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3424 * ICH8 has opposite polarity of no_snoop bits.
3425 * By default, we should use snoop behavior.
3427 if (mac->type == e1000_ich8lan)
3428 snoop = PCIE_ICH8_SNOOP_ALL;
3430 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3431 e1000_set_pcie_no_snoop_generic(hw, snoop);
3433 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3434 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3435 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3438 * Clear all of the statistics registers (clear on read). It is
3439 * important that we do this after we have tried to establish link
3440 * because the symbol error count will increment wildly if there
3443 e1000_clear_hw_cntrs_ich8lan(hw);
3448 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3449 * @hw: pointer to the HW structure
3451 * Sets/Clears required hardware bits necessary for correctly setting up the
3452 * hardware for transmit and receive.
3454 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3458 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3460 /* Extended Device Control */
3461 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3463 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3464 if (hw->mac.type >= e1000_pchlan)
3465 reg |= E1000_CTRL_EXT_PHYPDEN;
3466 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3468 /* Transmit Descriptor Control 0 */
3469 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3471 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3473 /* Transmit Descriptor Control 1 */
3474 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3476 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3478 /* Transmit Arbitration Control 0 */
3479 reg = E1000_READ_REG(hw, E1000_TARC(0));
3480 if (hw->mac.type == e1000_ich8lan)
3481 reg |= (1 << 28) | (1 << 29);
3482 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3483 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3485 /* Transmit Arbitration Control 1 */
3486 reg = E1000_READ_REG(hw, E1000_TARC(1));
3487 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3491 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3492 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3495 if (hw->mac.type == e1000_ich8lan) {
3496 reg = E1000_READ_REG(hw, E1000_STATUS);
3498 E1000_WRITE_REG(hw, E1000_STATUS, reg);
3502 * work-around descriptor data corruption issue during nfs v2 udp
3503 * traffic, just disable the nfs filtering capability
3505 reg = E1000_READ_REG(hw, E1000_RFCTL);
3506 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3507 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3513 * e1000_setup_link_ich8lan - Setup flow control and link settings
3514 * @hw: pointer to the HW structure
3516 * Determines which flow control settings to use, then configures flow
3517 * control. Calls the appropriate media-specific link configuration
3518 * function. Assuming the adapter has a valid link partner, a valid link
3519 * should be established. Assumes the hardware has previously been reset
3520 * and the transmitter and receiver are not enabled.
3522 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3524 s32 ret_val = E1000_SUCCESS;
3526 DEBUGFUNC("e1000_setup_link_ich8lan");
3528 if (hw->phy.ops.check_reset_block(hw))
3532 * ICH parts do not have a word in the NVM to determine
3533 * the default flow control setting, so we explicitly
3536 if (hw->fc.requested_mode == e1000_fc_default)
3537 hw->fc.requested_mode = e1000_fc_full;
3540 * Save off the requested flow control mode for use later. Depending
3541 * on the link partner's capabilities, we may or may not use this mode.
3543 hw->fc.current_mode = hw->fc.requested_mode;
3545 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3546 hw->fc.current_mode);
3548 /* Continue to configure the copper link. */
3549 ret_val = hw->mac.ops.setup_physical_interface(hw);
3553 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3554 if ((hw->phy.type == e1000_phy_82578) ||
3555 (hw->phy.type == e1000_phy_82579) ||
3556 (hw->phy.type == e1000_phy_82577)) {
3557 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3559 ret_val = hw->phy.ops.write_reg(hw,
3560 PHY_REG(BM_PORT_CTRL_PAGE, 27),
3566 ret_val = e1000_set_fc_watermarks_generic(hw);
3573 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3574 * @hw: pointer to the HW structure
3576 * Configures the kumeran interface to the PHY to wait the appropriate time
3577 * when polling the PHY, then call the generic setup_copper_link to finish
3578 * configuring the copper link.
3580 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3586 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3588 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3589 ctrl |= E1000_CTRL_SLU;
3590 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3591 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3594 * Set the mac to wait the maximum time between each iteration
3595 * and increase the max iterations when polling the phy;
3596 * this fixes erroneous timeouts at 10Mbps.
3598 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3602 ret_val = e1000_read_kmrn_reg_generic(hw,
3603 E1000_KMRNCTRLSTA_INBAND_PARAM,
3608 ret_val = e1000_write_kmrn_reg_generic(hw,
3609 E1000_KMRNCTRLSTA_INBAND_PARAM,
3614 switch (hw->phy.type) {
3615 case e1000_phy_igp_3:
3616 ret_val = e1000_copper_link_setup_igp(hw);
3621 case e1000_phy_82578:
3622 ret_val = e1000_copper_link_setup_m88(hw);
3626 case e1000_phy_82577:
3627 case e1000_phy_82579:
3628 ret_val = e1000_copper_link_setup_82577(hw);
3633 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3638 reg_data &= ~IFE_PMC_AUTO_MDIX;
3640 switch (hw->phy.mdix) {
3642 reg_data &= ~IFE_PMC_FORCE_MDIX;
3645 reg_data |= IFE_PMC_FORCE_MDIX;
3649 reg_data |= IFE_PMC_AUTO_MDIX;
3652 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3660 ret_val = e1000_setup_copper_link_generic(hw);
3667 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3668 * @hw: pointer to the HW structure
3669 * @speed: pointer to store current link speed
3670 * @duplex: pointer to store the current link duplex
3672 * Calls the generic get_speed_and_duplex to retrieve the current link
3673 * information and then calls the Kumeran lock loss workaround for links at
3676 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3681 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3683 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3687 if ((hw->mac.type == e1000_ich8lan) &&
3688 (hw->phy.type == e1000_phy_igp_3) &&
3689 (*speed == SPEED_1000)) {
3690 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3698 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3699 * @hw: pointer to the HW structure
3701 * Work-around for 82566 Kumeran PCS lock loss:
3702 * On link status change (i.e. PCI reset, speed change) and link is up and
3704 * 0) if workaround is optionally disabled do nothing
3705 * 1) wait 1ms for Kumeran link to come up
3706 * 2) check Kumeran Diagnostic register PCS lock loss bit
3707 * 3) if not set the link is locked (all is good), otherwise...
3709 * 5) repeat up to 10 times
3710 * Note: this is only called for IGP3 copper when speed is 1gb.
3712 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3714 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3716 s32 ret_val = E1000_SUCCESS;
3720 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3722 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3726 * Make sure link is up before proceeding. If not just return.
3727 * Attempting this while link is negotiating fouled up link
3730 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3732 ret_val = E1000_SUCCESS;
3736 for (i = 0; i < 10; i++) {
3737 /* read once to clear */
3738 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3741 /* and again to get new status */
3742 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3746 /* check for PCS lock */
3747 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3748 ret_val = E1000_SUCCESS;
3752 /* Issue PHY reset */
3753 hw->phy.ops.reset(hw);
3756 /* Disable GigE link negotiation */
3757 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3758 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3759 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3760 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3763 * Call gig speed drop workaround on Gig disable before accessing
3766 e1000_gig_downshift_workaround_ich8lan(hw);
3768 /* unable to acquire PCS lock */
3769 ret_val = -E1000_ERR_PHY;
3776 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3777 * @hw: pointer to the HW structure
3778 * @state: boolean value used to set the current Kumeran workaround state
3780 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
3781 * /disabled - FALSE).
3783 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3786 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3788 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3790 if (hw->mac.type != e1000_ich8lan) {
3791 DEBUGOUT("Workaround applies to ICH8 only.\n");
3795 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3801 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3802 * @hw: pointer to the HW structure
3804 * Workaround for 82566 power-down on D3 entry:
3805 * 1) disable gigabit link
3806 * 2) write VR power-down enable
3808 * Continue if successful, else issue LCD reset and repeat
3810 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3816 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3818 if (hw->phy.type != e1000_phy_igp_3)
3821 /* Try the workaround twice (if needed) */
3824 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3825 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3826 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3827 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3830 * Call gig speed drop workaround on Gig disable before
3831 * accessing any PHY registers
3833 if (hw->mac.type == e1000_ich8lan)
3834 e1000_gig_downshift_workaround_ich8lan(hw);
3836 /* Write VR power-down enable */
3837 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3838 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3839 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3840 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3842 /* Read it back and test */
3843 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3844 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3845 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3848 /* Issue PHY reset and repeat at most one more time */
3849 reg = E1000_READ_REG(hw, E1000_CTRL);
3850 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3859 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3860 * @hw: pointer to the HW structure
3862 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3863 * LPLU, Gig disable, MDIC PHY reset):
3864 * 1) Set Kumeran Near-end loopback
3865 * 2) Clear Kumeran Near-end loopback
3866 * Should only be called for ICH8[m] devices with IGP_3 Phy.
3868 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3870 s32 ret_val = E1000_SUCCESS;
3873 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3875 if ((hw->mac.type != e1000_ich8lan) ||
3876 (hw->phy.type != e1000_phy_igp_3))
3879 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3883 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3884 ret_val = e1000_write_kmrn_reg_generic(hw,
3885 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3889 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3890 ret_val = e1000_write_kmrn_reg_generic(hw,
3891 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3898 * e1000_disable_gig_wol_ich8lan - disable gig during WoL
3899 * @hw: pointer to the HW structure
3901 * During S0 to Sx transition, it is possible the link remains at gig
3902 * instead of negotiating to a lower speed. Before going to Sx, set
3903 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3906 * Should only be called for applicable parts.
3908 void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3913 DEBUGFUNC("e1000_disable_gig_wol_ich8lan");
3915 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3916 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3917 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3919 if (hw->mac.type >= e1000_pchlan) {
3920 e1000_oem_bits_config_ich8lan(hw, FALSE);
3921 ret_val = hw->phy.ops.acquire(hw);
3924 e1000_write_smbus_addr(hw);
3925 hw->phy.ops.release(hw);
3932 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3933 * @hw: pointer to the HW structure
3935 * Return the LED back to the default configuration.
3937 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3939 DEBUGFUNC("e1000_cleanup_led_ich8lan");
3941 if (hw->phy.type == e1000_phy_ife)
3942 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3945 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3946 return E1000_SUCCESS;
3950 * e1000_led_on_ich8lan - Turn LEDs on
3951 * @hw: pointer to the HW structure
3955 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3957 DEBUGFUNC("e1000_led_on_ich8lan");
3959 if (hw->phy.type == e1000_phy_ife)
3960 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3961 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3963 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3964 return E1000_SUCCESS;
3968 * e1000_led_off_ich8lan - Turn LEDs off
3969 * @hw: pointer to the HW structure
3971 * Turn off the LEDs.
3973 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3975 DEBUGFUNC("e1000_led_off_ich8lan");
3977 if (hw->phy.type == e1000_phy_ife)
3978 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3979 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3981 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3982 return E1000_SUCCESS;
3986 * e1000_setup_led_pchlan - Configures SW controllable LED
3987 * @hw: pointer to the HW structure
3989 * This prepares the SW controllable LED for use.
3991 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3993 DEBUGFUNC("e1000_setup_led_pchlan");
3995 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3996 (u16)hw->mac.ledctl_mode1);
4000 * e1000_cleanup_led_pchlan - Restore the default LED operation
4001 * @hw: pointer to the HW structure
4003 * Return the LED back to the default configuration.
4005 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4007 DEBUGFUNC("e1000_cleanup_led_pchlan");
4009 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4010 (u16)hw->mac.ledctl_default);
4014 * e1000_led_on_pchlan - Turn LEDs on
4015 * @hw: pointer to the HW structure
4019 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4021 u16 data = (u16)hw->mac.ledctl_mode2;
4024 DEBUGFUNC("e1000_led_on_pchlan");
4027 * If no link, then turn LED on by setting the invert bit
4028 * for each LED that's mode is "link_up" in ledctl_mode2.
4030 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4031 for (i = 0; i < 3; i++) {
4032 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4033 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4034 E1000_LEDCTL_MODE_LINK_UP)
4036 if (led & E1000_PHY_LED0_IVRT)
4037 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4039 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4043 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4047 * e1000_led_off_pchlan - Turn LEDs off
4048 * @hw: pointer to the HW structure
4050 * Turn off the LEDs.
4052 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4054 u16 data = (u16)hw->mac.ledctl_mode1;
4057 DEBUGFUNC("e1000_led_off_pchlan");
4060 * If no link, then turn LED off by clearing the invert bit
4061 * for each LED that's mode is "link_up" in ledctl_mode1.
4063 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4064 for (i = 0; i < 3; i++) {
4065 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4066 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4067 E1000_LEDCTL_MODE_LINK_UP)
4069 if (led & E1000_PHY_LED0_IVRT)
4070 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4072 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4076 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4080 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4081 * @hw: pointer to the HW structure
4083 * Read appropriate register for the config done bit for completion status
4084 * and configure the PHY through s/w for EEPROM-less parts.
4086 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4087 * config done bit, so only an error is logged and continues. If we were
4088 * to return with error, EEPROM-less silicon would not be able to be reset
4091 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4093 s32 ret_val = E1000_SUCCESS;
4097 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4099 e1000_get_cfg_done_generic(hw);
4101 /* Wait for indication from h/w that it has completed basic config */
4102 if (hw->mac.type >= e1000_ich10lan) {
4103 e1000_lan_init_done_ich8lan(hw);
4105 ret_val = e1000_get_auto_rd_done_generic(hw);
4108 * When auto config read does not complete, do not
4109 * return with an error. This can happen in situations
4110 * where there is no eeprom and prevents getting link.
4112 DEBUGOUT("Auto Read Done did not complete\n");
4113 ret_val = E1000_SUCCESS;
4117 /* Clear PHY Reset Asserted bit */
4118 status = E1000_READ_REG(hw, E1000_STATUS);
4119 if (status & E1000_STATUS_PHYRA)
4120 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4122 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4124 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4125 if (hw->mac.type <= e1000_ich9lan) {
4126 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4127 (hw->phy.type == e1000_phy_igp_3)) {
4128 e1000_phy_init_script_igp3(hw);
4131 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4132 /* Maybe we should do a basic PHY config */
4133 DEBUGOUT("EEPROM not present\n");
4134 ret_val = -E1000_ERR_CONFIG;
4142 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4143 * @hw: pointer to the HW structure
4145 * In the case of a PHY power down to save power, or to turn off link during a
4146 * driver unload, or wake on lan is not enabled, remove the link.
4148 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4150 /* If the management interface is not enabled, then power down */
4151 if (!(hw->mac.ops.check_mng_mode(hw) ||
4152 hw->phy.ops.check_reset_block(hw)))
4153 e1000_power_down_phy_copper(hw);
4159 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4160 * @hw: pointer to the HW structure
4162 * Clears hardware counters specific to the silicon family and calls
4163 * clear_hw_cntrs_generic to clear all general purpose counters.
4165 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4169 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4171 e1000_clear_hw_cntrs_base_generic(hw);
4173 E1000_READ_REG(hw, E1000_ALGNERRC);
4174 E1000_READ_REG(hw, E1000_RXERRC);
4175 E1000_READ_REG(hw, E1000_TNCRS);
4176 E1000_READ_REG(hw, E1000_CEXTERR);
4177 E1000_READ_REG(hw, E1000_TSCTC);
4178 E1000_READ_REG(hw, E1000_TSCTFC);
4180 E1000_READ_REG(hw, E1000_MGTPRC);
4181 E1000_READ_REG(hw, E1000_MGTPDC);
4182 E1000_READ_REG(hw, E1000_MGTPTC);
4184 E1000_READ_REG(hw, E1000_IAC);
4185 E1000_READ_REG(hw, E1000_ICRXOC);
4187 /* Clear PHY statistics registers */
4188 if ((hw->phy.type == e1000_phy_82578) ||
4189 (hw->phy.type == e1000_phy_82579) ||
4190 (hw->phy.type == e1000_phy_82577)) {
4191 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
4192 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
4193 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
4194 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
4195 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
4196 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
4197 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
4198 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
4199 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
4200 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
4201 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
4202 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
4203 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
4204 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);