1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2020, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include "ice_common.h"
37 * @hw: pointer to the HW struct
38 * @module_typeid: module pointer location in words from the NVM beginning
39 * @offset: byte offset from the module beginning
40 * @length: length of the section to be read (in bytes from the offset)
41 * @data: command buffer (size [bytes] = length)
42 * @last_command: tells if this is the last command in a series
43 * @read_shadow_ram: tell if this is a shadow RAM read
44 * @cd: pointer to command details structure or NULL
46 * Read the NVM using the admin queue commands (0x0701)
49 ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
50 void *data, bool last_command, bool read_shadow_ram,
53 struct ice_aq_desc desc;
54 struct ice_aqc_nvm *cmd;
56 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
58 cmd = &desc.params.nvm;
60 if (offset > ICE_AQC_NVM_MAX_OFFSET)
63 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
65 if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT)
66 cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY;
68 /* If this is the last command in a series, set the proper flag. */
70 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
71 cmd->module_typeid = CPU_TO_LE16(module_typeid);
72 cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF);
73 cmd->offset_high = (offset >> 16) & 0xFF;
74 cmd->length = CPU_TO_LE16(length);
76 return ice_aq_send_cmd(hw, &desc, data, length, cd);
80 * ice_read_flat_nvm - Read portion of NVM by flat offset
81 * @hw: pointer to the HW struct
82 * @offset: offset from beginning of NVM
83 * @length: (in) number of bytes to read; (out) number of bytes actually read
84 * @data: buffer to return data in (sized to fit the specified length)
85 * @read_shadow_ram: if true, read from shadow RAM instead of NVM
87 * Reads a portion of the NVM, as a flat memory space. This function correctly
88 * breaks read requests across Shadow RAM sectors and ensures that no single
89 * read request exceeds the maximum 4KB read for a single AdminQ command.
91 * Returns a status code on failure. Note that the data pointer may be
92 * partially updated if some reads succeed before a failure.
95 ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
98 enum ice_status status;
103 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
107 /* Verify the length of the read if this is for the Shadow RAM */
108 if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
109 ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n");
110 return ICE_ERR_PARAM;
114 u32 read_size, sector_offset;
116 /* ice_aq_read_nvm cannot read more than 4KB at a time.
117 * Additionally, a read from the Shadow RAM may not cross over
118 * a sector boundary. Conveniently, the sector size is also
121 sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
122 read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
125 last_cmd = !(bytes_read + read_size < inlen);
127 /* ice_aq_read_nvm takes the length as a u16. Our read_size is
128 * calculated using a u32, but the ICE_AQ_MAX_BUF_LEN maximum
129 * size guarantees that it will fit within the 2 bytes.
131 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
132 offset, (u16)read_size,
133 data + bytes_read, last_cmd,
134 read_shadow_ram, NULL);
138 bytes_read += read_size;
142 *length = bytes_read;
148 * @hw: pointer to the HW struct
149 * @module_typeid: module pointer location in words from the NVM beginning
150 * @offset: byte offset from the module beginning
151 * @length: length of the section to be written (in bytes from the offset)
152 * @data: command buffer (size [bytes] = length)
153 * @last_command: tells if this is the last command in a series
154 * @command_flags: command parameters
155 * @cd: pointer to command details structure or NULL
157 * Update the NVM using the admin queue commands (0x0703)
160 ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
161 u16 length, void *data, bool last_command, u8 command_flags,
162 struct ice_sq_cd *cd)
164 struct ice_aq_desc desc;
165 struct ice_aqc_nvm *cmd;
167 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
169 cmd = &desc.params.nvm;
171 /* In offset the highest byte must be zeroed. */
172 if (offset & 0xFF000000)
173 return ICE_ERR_PARAM;
175 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
177 cmd->cmd_flags |= command_flags;
179 /* If this is the last command in a series, set the proper flag. */
181 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
182 cmd->module_typeid = CPU_TO_LE16(module_typeid);
183 cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF);
184 cmd->offset_high = (offset >> 16) & 0xFF;
185 cmd->length = CPU_TO_LE16(length);
187 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
189 return ice_aq_send_cmd(hw, &desc, data, length, cd);
194 * @hw: pointer to the HW struct
195 * @module_typeid: module pointer location in words from the NVM beginning
196 * @cd: pointer to command details structure or NULL
198 * Erase the NVM sector using the admin queue commands (0x0702)
201 ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
203 struct ice_aq_desc desc;
204 struct ice_aqc_nvm *cmd;
206 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
208 cmd = &desc.params.nvm;
210 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
212 cmd->module_typeid = CPU_TO_LE16(module_typeid);
213 cmd->length = CPU_TO_LE16(ICE_AQC_NVM_ERASE_LEN);
215 cmd->offset_high = 0;
217 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
221 * ice_aq_read_nvm_cfg - read an NVM config block
222 * @hw: pointer to the HW struct
223 * @cmd_flags: NVM access admin command bits
224 * @field_id: field or feature ID
225 * @data: buffer for result
226 * @buf_size: buffer size
227 * @elem_count: pointer to count of elements read by FW
228 * @cd: pointer to command details structure or NULL
230 * Reads single or multiple feature/field ID and data (0x0704)
233 ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
234 u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd)
236 struct ice_aqc_nvm_cfg *cmd;
237 struct ice_aq_desc desc;
238 enum ice_status status;
240 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
242 cmd = &desc.params.nvm_cfg;
244 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_read);
246 cmd->cmd_flags = cmd_flags;
247 cmd->id = CPU_TO_LE16(field_id);
249 status = ice_aq_send_cmd(hw, &desc, data, buf_size, cd);
250 if (!status && elem_count)
251 *elem_count = LE16_TO_CPU(cmd->count);
257 * ice_aq_write_nvm_cfg - write an NVM config block
258 * @hw: pointer to the HW struct
259 * @cmd_flags: NVM access admin command bits
260 * @data: buffer for result
261 * @buf_size: buffer size
262 * @elem_count: count of elements to be written
263 * @cd: pointer to command details structure or NULL
265 * Writes single or multiple feature/field ID and data (0x0705)
268 ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
269 u16 elem_count, struct ice_sq_cd *cd)
271 struct ice_aqc_nvm_cfg *cmd;
272 struct ice_aq_desc desc;
274 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
276 cmd = &desc.params.nvm_cfg;
278 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_write);
279 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
281 cmd->count = CPU_TO_LE16(elem_count);
282 cmd->cmd_flags = cmd_flags;
284 return ice_aq_send_cmd(hw, &desc, data, buf_size, cd);
288 * ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
289 * @hw: pointer to the HW structure
290 * @offset: offset in words from module start
291 * @words: number of words to access
293 static enum ice_status
294 ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
296 if ((offset + words) > hw->nvm.sr_words) {
297 ice_debug(hw, ICE_DBG_NVM, "NVM error: offset beyond SR lmt.\n");
298 return ICE_ERR_PARAM;
301 if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
302 /* We can access only up to 4KB (one sector), in one AQ write */
303 ice_debug(hw, ICE_DBG_NVM, "NVM error: tried to access %d words, limit is %d.\n",
304 words, ICE_SR_SECTOR_SIZE_IN_WORDS);
305 return ICE_ERR_PARAM;
308 if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
309 (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
310 /* A single access cannot spread over two sectors */
311 ice_debug(hw, ICE_DBG_NVM, "NVM error: cannot spread over two sectors.\n");
312 return ICE_ERR_PARAM;
319 * ice_read_sr_word_aq - Reads Shadow RAM via AQ
320 * @hw: pointer to the HW structure
321 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
322 * @data: word read from the Shadow RAM
324 * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
326 enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
328 u32 bytes = sizeof(u16);
329 enum ice_status status;
332 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
334 /* Note that ice_read_flat_nvm checks if the read is past the Shadow
335 * RAM size, and ensures we don't read across a Shadow RAM sector
338 status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
339 (_FORCE_ u8 *)&data_local, true);
343 *data = LE16_TO_CPU(data_local);
348 * ice_write_sr_aq - Writes Shadow RAM.
349 * @hw: pointer to the HW structure
350 * @offset: offset in words from module start
351 * @words: number of words to write
352 * @data: buffer with words to write to the Shadow RAM
353 * @last_command: tells the AdminQ that this is the last command
355 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
357 static enum ice_status
358 ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data,
361 enum ice_status status;
363 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
365 status = ice_check_sr_access_params(hw, offset, words);
367 status = ice_aq_update_nvm(hw, 0, 2 * offset, 2 * words, data,
368 last_command, 0, NULL);
374 * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ
375 * @hw: pointer to the HW structure
376 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
377 * @words: (in) number of words to read; (out) number of words actually read
378 * @data: words read from the Shadow RAM
380 * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
381 * taken before reading the buffer and later released.
383 static enum ice_status
384 ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
386 u32 bytes = *words * 2, i;
387 enum ice_status status;
389 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
391 /* ice_read_flat_nvm takes into account the 4KB AdminQ and Shadow RAM
392 * sector restrictions necessary when reading from the NVM.
394 status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
396 /* Report the number of words successfully read */
399 /* Byte swap the words up to the amount we actually read */
400 for (i = 0; i < *words; i++)
401 data[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]);
407 * ice_acquire_nvm - Generic request for acquiring the NVM ownership
408 * @hw: pointer to the HW structure
409 * @access: NVM access type (read or write)
411 * This function will request NVM ownership.
414 ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
416 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
418 if (hw->nvm.blank_nvm_mode)
421 return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
425 * ice_release_nvm - Generic request for releasing the NVM ownership
426 * @hw: pointer to the HW structure
428 * This function will release NVM ownership.
430 void ice_release_nvm(struct ice_hw *hw)
432 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
434 if (hw->nvm.blank_nvm_mode)
437 ice_release_res(hw, ICE_NVM_RES_ID);
441 * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary
442 * @hw: pointer to the HW structure
443 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
444 * @data: word read from the Shadow RAM
446 * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
448 enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
450 enum ice_status status;
452 status = ice_acquire_nvm(hw, ICE_RES_READ);
454 status = ice_read_sr_word_aq(hw, offset, data);
462 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
463 * @hw: pointer to hardware structure
464 * @module_tlv: pointer to module TLV to return
465 * @module_tlv_len: pointer to module TLV length to return
466 * @module_type: module type requested
468 * Finds the requested sub module TLV type from the Preserved Field
469 * Area (PFA) and returns the TLV pointer and length. The caller can
470 * use these to read the variable length TLV value.
473 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
476 enum ice_status status;
477 u16 pfa_len, pfa_ptr;
480 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
481 if (status != ICE_SUCCESS) {
482 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
485 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
486 if (status != ICE_SUCCESS) {
487 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
490 /* Starting with first TLV after PFA length, iterate through the list
491 * of TLVs to find the requested one.
493 next_tlv = pfa_ptr + 1;
494 while (next_tlv < pfa_ptr + pfa_len) {
495 u16 tlv_sub_module_type;
499 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
500 if (status != ICE_SUCCESS) {
501 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
504 /* Read TLV length */
505 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
506 if (status != ICE_SUCCESS) {
507 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
510 if (tlv_sub_module_type == module_type) {
512 *module_tlv = next_tlv;
513 *module_tlv_len = tlv_len;
516 return ICE_ERR_INVAL_SIZE;
518 /* Check next TLV, i.e. current TLV pointer + length + 2 words
519 * (for current TLV's type and length)
521 next_tlv = next_tlv + tlv_len + 2;
523 /* Module does not exist */
524 return ICE_ERR_DOES_NOT_EXIST;
528 * ice_read_pba_string - Reads part number string from NVM
529 * @hw: pointer to hardware structure
530 * @pba_num: stores the part number string from the NVM
531 * @pba_num_size: part number string buffer length
533 * Reads the part number string from the NVM.
536 ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
538 u16 pba_tlv, pba_tlv_len;
539 enum ice_status status;
540 u16 pba_word, pba_size;
543 status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
544 ICE_SR_PBA_BLOCK_PTR);
545 if (status != ICE_SUCCESS) {
546 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
550 /* pba_size is the next word */
551 status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
552 if (status != ICE_SUCCESS) {
553 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
557 if (pba_tlv_len < pba_size) {
558 ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
559 return ICE_ERR_INVAL_SIZE;
562 /* Subtract one to get PBA word count (PBA Size word is included in
566 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
567 ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
568 return ICE_ERR_PARAM;
571 for (i = 0; i < pba_size; i++) {
572 status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
573 if (status != ICE_SUCCESS) {
574 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
578 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
579 pba_num[(i * 2) + 1] = pba_word & 0xFF;
581 pba_num[(pba_size * 2)] = '\0';
587 * ice_get_orom_ver_info - Read Option ROM version information
588 * @hw: pointer to the HW struct
590 * Read the Combo Image version data from the Boot Configuration TLV and fill
591 * in the option ROM version data.
593 static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
595 u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
596 struct ice_orom_info *orom = &hw->nvm.orom;
597 enum ice_status status;
600 status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
601 ICE_SR_BOOT_CFG_PTR);
603 ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n");
607 /* Boot Configuration Block must have length at least 2 words
608 * (Combo Image Version High and Combo Image Version Low)
610 if (boot_cfg_tlv_len < 2) {
611 ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n");
612 return ICE_ERR_INVAL_SIZE;
615 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
618 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
622 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
625 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
629 combo_ver = ((u32)combo_hi << 16) | combo_lo;
631 orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
633 orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
634 orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
635 ICE_OROM_VER_BUILD_SHIFT);
641 * ice_get_netlist_ver_info
642 * @hw: pointer to the HW struct
644 * Get the netlist version information
646 enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
648 struct ice_netlist_ver_info *ver = &hw->netlist_ver;
655 ret = ice_acquire_nvm(hw, ICE_RES_READ);
658 buff = (u16 *)ice_calloc(hw, ICE_AQC_NVM_NETLIST_ID_BLK_LEN,
661 ret = ICE_ERR_NO_MEMORY;
665 /* read module length */
666 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
667 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
668 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
673 data = LE16_TO_CPU(raw_data);
674 /* exit if length is = 0 */
678 /* read node count */
679 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
680 ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
681 ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
685 data = LE16_TO_CPU(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
687 /* netlist ID block starts from offset 4 + node count * 2 */
688 id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
690 /* read the entire netlist ID block */
691 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
693 ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
698 for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
699 buff[i] = LE16_TO_CPU(((_FORCE_ __le16 *)buff)[i]);
701 ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
702 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
703 ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
704 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
705 ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
706 buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
707 ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
708 buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
709 ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
710 /* Read the left most 4 bytes of SHA */
711 ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
712 buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
722 * ice_discover_flash_size - Discover the available flash size.
723 * @hw: pointer to the HW struct
725 * The device flash could be up to 16MB in size. However, it is possible that
726 * the actual size is smaller. Use bisection to determine the accessible size
729 static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
731 u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
732 enum ice_status status;
734 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
736 status = ice_acquire_nvm(hw, ICE_RES_READ);
740 while ((max_size - min_size) > 1) {
741 u32 offset = (max_size + min_size) / 2;
745 status = ice_read_flat_nvm(hw, offset, &len, &data, false);
746 if (status == ICE_ERR_AQ_ERROR &&
747 hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
748 ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
750 status = ICE_SUCCESS;
752 } else if (!status) {
753 ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n",
757 /* an unexpected error occurred */
758 goto err_read_flat_nvm;
762 ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size);
764 hw->nvm.flash_size = max_size;
773 * ice_init_nvm - initializes NVM setting
774 * @hw: pointer to the HW struct
776 * This function reads and populates NVM settings such as Shadow RAM size,
777 * max_timeout, and blank_nvm_mode
779 enum ice_status ice_init_nvm(struct ice_hw *hw)
781 struct ice_nvm_info *nvm = &hw->nvm;
782 u16 eetrack_lo, eetrack_hi, ver;
783 enum ice_status status;
787 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
789 /* The SR size is stored regardless of the NVM programming mode
790 * as the blank mode may be used in the factory line.
792 gens_stat = rd32(hw, GLNVM_GENS);
793 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
795 /* Switching to words (sr_size contains power of 2) */
796 nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
798 /* Check if we are in the normal or blank NVM programming mode */
799 fla = rd32(hw, GLNVM_FLA);
800 if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
801 nvm->blank_nvm_mode = false;
803 /* Blank programming mode */
804 nvm->blank_nvm_mode = true;
805 ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
806 return ICE_ERR_NVM_BLANK_MODE;
809 status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
811 ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n");
814 nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
815 nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
817 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
819 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
822 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
824 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
828 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
830 status = ice_discover_flash_size(hw);
832 ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
836 status = ice_get_orom_ver_info(hw);
838 ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
842 /* read the netlist version information */
843 status = ice_get_netlist_ver_info(hw);
845 ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
850 * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary
851 * @hw: pointer to the HW structure
852 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
853 * @words: (in) number of words to read; (out) number of words actually read
854 * @data: words read from the Shadow RAM
856 * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq
857 * method. The buf read is preceded by the NVM ownership take
858 * and followed by the release.
861 ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
863 enum ice_status status;
865 status = ice_acquire_nvm(hw, ICE_RES_READ);
867 status = ice_read_sr_buf_aq(hw, offset, words, data);
875 * __ice_write_sr_word - Writes Shadow RAM word
876 * @hw: pointer to the HW structure
877 * @offset: offset of the Shadow RAM word to write
878 * @data: word to write to the Shadow RAM
880 * Writes a 16 bit word to the SR using the ice_write_sr_aq method.
881 * NVM ownership have to be acquired and released (on ARQ completion event
882 * reception) by caller. To commit SR to NVM update checksum function
886 __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data)
888 __le16 data_local = CPU_TO_LE16(*data);
890 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
892 /* Value 0x00 below means that we treat SR as a flat mem */
893 return ice_write_sr_aq(hw, offset, 1, &data_local, false);
897 * __ice_write_sr_buf - Writes Shadow RAM buf
898 * @hw: pointer to the HW structure
899 * @offset: offset of the Shadow RAM buffer to write
900 * @words: number of words to write
901 * @data: words to write to the Shadow RAM
903 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
904 * NVM ownership must be acquired before calling this function and released
905 * on ARQ completion event reception by caller. To commit SR to NVM update
906 * checksum function should be called.
909 __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data)
911 enum ice_status status;
916 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
918 vmem = ice_calloc(hw, words, sizeof(u16));
920 return ICE_ERR_NO_MEMORY;
921 data_local = (_FORCE_ __le16 *)vmem;
923 for (i = 0; i < words; i++)
924 data_local[i] = CPU_TO_LE16(data[i]);
926 /* Here we will only write one buffer as the size of the modules
927 * mirrored in the Shadow RAM is always less than 4K.
929 status = ice_write_sr_aq(hw, offset, words, data_local, false);
937 * ice_calc_sr_checksum - Calculates and returns Shadow RAM SW checksum
938 * @hw: pointer to hardware structure
939 * @checksum: pointer to the checksum
941 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
942 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
943 * is customer specific and unknown. Therefore, this function skips all maximum
944 * possible size of VPD (1kB).
946 static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum)
948 enum ice_status status = ICE_SUCCESS;
949 u16 pcie_alt_module = 0;
950 u16 checksum_local = 0;
956 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
958 vmem = ice_calloc(hw, ICE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
960 return ICE_ERR_NO_MEMORY;
963 /* read pointer to VPD area */
964 status = ice_read_sr_word_aq(hw, ICE_SR_VPD_PTR, &vpd_module);
966 goto ice_calc_sr_checksum_exit;
968 /* read pointer to PCIe Alt Auto-load module */
969 status = ice_read_sr_word_aq(hw, ICE_SR_PCIE_ALT_AUTO_LOAD_PTR,
972 goto ice_calc_sr_checksum_exit;
974 /* Calculate SW checksum that covers the whole 64kB shadow RAM
975 * except the VPD and PCIe ALT Auto-load modules
977 for (i = 0; i < hw->nvm.sr_words; i++) {
979 if ((i % ICE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
980 u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS;
982 status = ice_read_sr_buf_aq(hw, i, &words, data);
983 if (status != ICE_SUCCESS)
984 goto ice_calc_sr_checksum_exit;
987 /* Skip Checksum word */
988 if (i == ICE_SR_SW_CHECKSUM_WORD)
990 /* Skip VPD module (convert byte size to word count) */
991 if (i >= (u32)vpd_module &&
992 i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS))
994 /* Skip PCIe ALT module (convert byte size to word count) */
995 if (i >= (u32)pcie_alt_module &&
996 i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS))
999 checksum_local += data[i % ICE_SR_SECTOR_SIZE_IN_WORDS];
1002 *checksum = (u16)ICE_SR_SW_CHECKSUM_BASE - checksum_local;
1004 ice_calc_sr_checksum_exit:
1010 * ice_update_sr_checksum - Updates the Shadow RAM SW checksum
1011 * @hw: pointer to hardware structure
1013 * NVM ownership must be acquired before calling this function and released
1014 * on ARQ completion event reception by caller.
1015 * This function will commit SR to NVM.
1017 enum ice_status ice_update_sr_checksum(struct ice_hw *hw)
1019 enum ice_status status;
1023 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1025 status = ice_calc_sr_checksum(hw, &checksum);
1027 le_sum = CPU_TO_LE16(checksum);
1028 status = ice_write_sr_aq(hw, ICE_SR_SW_CHECKSUM_WORD, 1,
1035 * ice_validate_sr_checksum - Validate Shadow RAM SW checksum
1036 * @hw: pointer to hardware structure
1037 * @checksum: calculated checksum
1039 * Performs checksum calculation and validates the Shadow RAM SW checksum.
1040 * If the caller does not need checksum, the value can be NULL.
1042 enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum)
1044 enum ice_status status;
1048 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1050 status = ice_acquire_nvm(hw, ICE_RES_READ);
1052 status = ice_calc_sr_checksum(hw, &checksum_local);
1053 ice_release_nvm(hw);
1060 ice_read_sr_word(hw, ICE_SR_SW_CHECKSUM_WORD, &checksum_sr);
1062 /* Verify read checksum from EEPROM is the same as
1063 * calculated checksum
1065 if (checksum_local != checksum_sr)
1066 status = ICE_ERR_NVM_CHECKSUM;
1068 /* If the user cares, return the calculated checksum */
1070 *checksum = checksum_local;
1076 * ice_nvm_validate_checksum
1077 * @hw: pointer to the HW struct
1079 * Verify NVM PFA checksum validity (0x0706)
1081 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
1083 struct ice_aqc_nvm_checksum *cmd;
1084 struct ice_aq_desc desc;
1085 enum ice_status status;
1087 status = ice_acquire_nvm(hw, ICE_RES_READ);
1091 cmd = &desc.params.nvm_checksum;
1093 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
1094 cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
1096 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1097 ice_release_nvm(hw);
1100 if (LE16_TO_CPU(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
1101 status = ICE_ERR_NVM_CHECKSUM;
1107 * ice_nvm_access_get_features - Return the NVM access features structure
1108 * @cmd: NVM access command to process
1109 * @data: storage for the driver NVM features
1111 * Fill in the data section of the NVM access request with a copy of the NVM
1112 * features structure.
1115 ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
1116 union ice_nvm_access_data *data)
1118 /* The provided data_size must be at least as large as our NVM
1119 * features structure. A larger size should not be treated as an
1120 * error, to allow future extensions to the features structure to
1121 * work on older drivers.
1123 if (cmd->data_size < sizeof(struct ice_nvm_features))
1124 return ICE_ERR_NO_MEMORY;
1126 /* Initialize the data buffer to zeros */
1127 ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM);
1129 /* Fill in the features data */
1130 data->drv_features.major = ICE_NVM_ACCESS_MAJOR_VER;
1131 data->drv_features.minor = ICE_NVM_ACCESS_MINOR_VER;
1132 data->drv_features.size = sizeof(struct ice_nvm_features);
1133 data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS;
1139 * ice_nvm_access_get_module - Helper function to read module value
1140 * @cmd: NVM access command structure
1142 * Reads the module value out of the NVM access config field.
1144 u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd)
1146 return ((cmd->config & ICE_NVM_CFG_MODULE_M) >> ICE_NVM_CFG_MODULE_S);
1150 * ice_nvm_access_get_flags - Helper function to read flags value
1151 * @cmd: NVM access command structure
1153 * Reads the flags value out of the NVM access config field.
1155 u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd)
1157 return ((cmd->config & ICE_NVM_CFG_FLAGS_M) >> ICE_NVM_CFG_FLAGS_S);
1161 * ice_nvm_access_get_adapter - Helper function to read adapter info
1162 * @cmd: NVM access command structure
1164 * Read the adapter info value out of the NVM access config field.
1166 u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd)
1168 return ((cmd->config & ICE_NVM_CFG_ADAPTER_INFO_M) >>
1169 ICE_NVM_CFG_ADAPTER_INFO_S);
1173 * ice_validate_nvm_rw_reg - Check than an NVM access request is valid
1174 * @cmd: NVM access command structure
1176 * Validates that an NVM access structure is request to read or write a valid
1177 * register offset. First validates that the module and flags are correct, and
1178 * then ensures that the register offset is one of the accepted registers.
1180 static enum ice_status
1181 ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
1183 u32 module, flags, offset;
1186 module = ice_nvm_access_get_module(cmd);
1187 flags = ice_nvm_access_get_flags(cmd);
1188 offset = cmd->offset;
1190 /* Make sure the module and flags indicate a read/write request */
1191 if (module != ICE_NVM_REG_RW_MODULE ||
1192 flags != ICE_NVM_REG_RW_FLAGS ||
1193 cmd->data_size != FIELD_SIZEOF(union ice_nvm_access_data, regval))
1194 return ICE_ERR_PARAM;
1198 case GL_HICR_EN: /* Note, this register is read only */
1201 case GLGEN_CSR_DEBUG_C:
1203 case GLPCI_LBARCTRL:
1212 for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++)
1213 if (offset == (u32)GL_HIDA(i))
1216 for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++)
1217 if (offset == (u32)GL_HIBA(i))
1220 /* All other register offsets are not valid */
1221 return ICE_ERR_OUT_OF_RANGE;
1225 * ice_nvm_access_read - Handle an NVM read request
1226 * @hw: pointer to the HW struct
1227 * @cmd: NVM access command to process
1228 * @data: storage for the register value read
1230 * Process an NVM access request to read a register.
1233 ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
1234 union ice_nvm_access_data *data)
1236 enum ice_status status;
1238 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1240 /* Always initialize the output data, even on failure */
1241 ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM);
1243 /* Make sure this is a valid read/write access request */
1244 status = ice_validate_nvm_rw_reg(cmd);
1248 ice_debug(hw, ICE_DBG_NVM, "NVM access: reading register %08x\n",
1251 /* Read the register and store the contents in the data field */
1252 data->regval = rd32(hw, cmd->offset);
1258 * ice_nvm_access_write - Handle an NVM write request
1259 * @hw: pointer to the HW struct
1260 * @cmd: NVM access command to process
1261 * @data: NVM access data to write
1263 * Process an NVM access request to write a register.
1266 ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
1267 union ice_nvm_access_data *data)
1269 enum ice_status status;
1271 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1273 /* Make sure this is a valid read/write access request */
1274 status = ice_validate_nvm_rw_reg(cmd);
1278 /* Reject requests to write to read-only registers */
1279 switch (cmd->offset) {
1282 return ICE_ERR_OUT_OF_RANGE;
1287 ice_debug(hw, ICE_DBG_NVM, "NVM access: writing register %08x with value %08x\n",
1288 cmd->offset, data->regval);
1290 /* Write the data field to the specified register */
1291 wr32(hw, cmd->offset, data->regval);
1297 * ice_handle_nvm_access - Handle an NVM access request
1298 * @hw: pointer to the HW struct
1299 * @cmd: NVM access command info
1300 * @data: pointer to read or return data
1302 * Process an NVM access request. Read the command structure information and
1303 * determine if it is valid. If not, report an error indicating the command
1306 * For valid commands, perform the necessary function, copying the data into
1307 * the provided data buffer.
1310 ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
1311 union ice_nvm_access_data *data)
1313 u32 module, flags, adapter_info;
1315 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1317 /* Extended flags are currently reserved and must be zero */
1318 if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0)
1319 return ICE_ERR_PARAM;
1321 /* Adapter info must match the HW device ID */
1322 adapter_info = ice_nvm_access_get_adapter(cmd);
1323 if (adapter_info != hw->device_id)
1324 return ICE_ERR_PARAM;
1326 switch (cmd->command) {
1327 case ICE_NVM_CMD_READ:
1328 module = ice_nvm_access_get_module(cmd);
1329 flags = ice_nvm_access_get_flags(cmd);
1331 /* Getting the driver's NVM features structure shares the same
1332 * command type as reading a register. Read the config field
1333 * to determine if this is a request to get features.
1335 if (module == ICE_NVM_GET_FEATURES_MODULE &&
1336 flags == ICE_NVM_GET_FEATURES_FLAGS &&
1338 return ice_nvm_access_get_features(cmd, data);
1340 return ice_nvm_access_read(hw, cmd, data);
1341 case ICE_NVM_CMD_WRITE:
1342 return ice_nvm_access_write(hw, cmd, data);
1344 return ICE_ERR_PARAM;