1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2020, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include "ice_common.h"
35 #define ICE_CQ_INIT_REGS(qinfo, prefix) \
37 (qinfo)->sq.head = prefix##_ATQH; \
38 (qinfo)->sq.tail = prefix##_ATQT; \
39 (qinfo)->sq.len = prefix##_ATQLEN; \
40 (qinfo)->sq.bah = prefix##_ATQBAH; \
41 (qinfo)->sq.bal = prefix##_ATQBAL; \
42 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
43 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
44 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
45 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
46 (qinfo)->rq.head = prefix##_ARQH; \
47 (qinfo)->rq.tail = prefix##_ARQT; \
48 (qinfo)->rq.len = prefix##_ARQLEN; \
49 (qinfo)->rq.bah = prefix##_ARQBAH; \
50 (qinfo)->rq.bal = prefix##_ARQBAL; \
51 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
52 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
53 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
54 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
58 * ice_adminq_init_regs - Initialize AdminQ registers
59 * @hw: pointer to the hardware structure
61 * This assumes the alloc_sq and alloc_rq functions have already been called
63 static void ice_adminq_init_regs(struct ice_hw *hw)
65 struct ice_ctl_q_info *cq = &hw->adminq;
67 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
69 ICE_CQ_INIT_REGS(cq, PF_FW);
73 * ice_mailbox_init_regs - Initialize Mailbox registers
74 * @hw: pointer to the hardware structure
76 * This assumes the alloc_sq and alloc_rq functions have already been called
78 static void ice_mailbox_init_regs(struct ice_hw *hw)
80 struct ice_ctl_q_info *cq = &hw->mailboxq;
82 ICE_CQ_INIT_REGS(cq, PF_MBX);
87 * @hw: pointer to the HW struct
88 * @cq: pointer to the specific Control queue
90 * Returns true if Queue is enabled else false.
92 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
94 /* check both queue-length and queue-enable fields */
95 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
96 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
97 cq->sq.len_ena_mask)) ==
98 (cq->num_sq_entries | cq->sq.len_ena_mask);
104 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
105 * @hw: pointer to the hardware structure
106 * @cq: pointer to the specific Control queue
108 static enum ice_status
109 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
111 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
113 cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
114 if (!cq->sq.desc_buf.va)
115 return ICE_ERR_NO_MEMORY;
117 cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
118 sizeof(struct ice_sq_cd));
119 if (!cq->sq.cmd_buf) {
120 ice_free_dma_mem(hw, &cq->sq.desc_buf);
121 return ICE_ERR_NO_MEMORY;
128 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
129 * @hw: pointer to the hardware structure
130 * @cq: pointer to the specific Control queue
132 static enum ice_status
133 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
135 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
137 cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
138 if (!cq->rq.desc_buf.va)
139 return ICE_ERR_NO_MEMORY;
144 * ice_free_cq_ring - Free control queue ring
145 * @hw: pointer to the hardware structure
146 * @ring: pointer to the specific control queue ring
148 * This assumes the posted buffers have already been cleaned
151 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
153 ice_free_dma_mem(hw, &ring->desc_buf);
157 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
158 * @hw: pointer to the hardware structure
159 * @cq: pointer to the specific Control queue
161 static enum ice_status
162 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
166 /* We'll be allocating the buffer info memory first, then we can
167 * allocate the mapped buffers for the event processing
169 cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
170 sizeof(cq->rq.desc_buf));
171 if (!cq->rq.dma_head)
172 return ICE_ERR_NO_MEMORY;
173 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
175 /* allocate the mapped buffers */
176 for (i = 0; i < cq->num_rq_entries; i++) {
177 struct ice_aq_desc *desc;
178 struct ice_dma_mem *bi;
180 bi = &cq->rq.r.rq_bi[i];
181 bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
183 goto unwind_alloc_rq_bufs;
185 /* now configure the descriptors for use */
186 desc = ICE_CTL_Q_DESC(cq->rq, i);
188 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
189 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
190 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
192 /* This is in accordance with Admin queue design, there is no
193 * register for buffer size configuration
195 desc->datalen = CPU_TO_LE16(bi->size);
197 desc->cookie_high = 0;
198 desc->cookie_low = 0;
199 desc->params.generic.addr_high =
200 CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
201 desc->params.generic.addr_low =
202 CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
203 desc->params.generic.param0 = 0;
204 desc->params.generic.param1 = 0;
208 unwind_alloc_rq_bufs:
209 /* don't try to free the one that failed... */
212 ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
213 cq->rq.r.rq_bi = NULL;
214 ice_free(hw, cq->rq.dma_head);
215 cq->rq.dma_head = NULL;
217 return ICE_ERR_NO_MEMORY;
221 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
222 * @hw: pointer to the hardware structure
223 * @cq: pointer to the specific Control queue
225 static enum ice_status
226 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
230 /* No mapped memory needed yet, just the buffer info structures */
231 cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
232 sizeof(cq->sq.desc_buf));
233 if (!cq->sq.dma_head)
234 return ICE_ERR_NO_MEMORY;
235 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
237 /* allocate the mapped buffers */
238 for (i = 0; i < cq->num_sq_entries; i++) {
239 struct ice_dma_mem *bi;
241 bi = &cq->sq.r.sq_bi[i];
242 bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
244 goto unwind_alloc_sq_bufs;
248 unwind_alloc_sq_bufs:
249 /* don't try to free the one that failed... */
252 ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
253 cq->sq.r.sq_bi = NULL;
254 ice_free(hw, cq->sq.dma_head);
255 cq->sq.dma_head = NULL;
257 return ICE_ERR_NO_MEMORY;
260 static enum ice_status
261 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
263 /* Clear Head and Tail */
264 wr32(hw, ring->head, 0);
265 wr32(hw, ring->tail, 0);
267 /* set starting point */
268 wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
269 wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
270 wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
272 /* Check one register to verify that config was applied */
273 if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
274 return ICE_ERR_AQ_ERROR;
280 * ice_cfg_sq_regs - configure Control ATQ registers
281 * @hw: pointer to the hardware structure
282 * @cq: pointer to the specific Control queue
284 * Configure base address and length registers for the transmit queue
286 static enum ice_status
287 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
289 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
293 * ice_cfg_rq_regs - configure Control ARQ register
294 * @hw: pointer to the hardware structure
295 * @cq: pointer to the specific Control queue
297 * Configure base address and length registers for the receive (event queue)
299 static enum ice_status
300 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
302 enum ice_status status;
304 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
308 /* Update tail in the HW to post pre-allocated buffers */
309 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
314 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
316 /* free descriptors */ \
317 if ((qi)->ring.r.ring##_bi) { \
320 for (i = 0; i < (qi)->num_##ring##_entries; i++) \
321 if ((qi)->ring.r.ring##_bi[i].pa) \
322 ice_free_dma_mem((hw), \
323 &(qi)->ring.r.ring##_bi[i]); \
325 /* free the buffer info list */ \
326 if ((qi)->ring.cmd_buf) \
327 ice_free(hw, (qi)->ring.cmd_buf); \
328 /* free DMA head */ \
329 ice_free(hw, (qi)->ring.dma_head); \
333 * ice_init_sq - main initialization routine for Control ATQ
334 * @hw: pointer to the hardware structure
335 * @cq: pointer to the specific Control queue
337 * This is the main initialization routine for the Control Send Queue
338 * Prior to calling this function, the driver *MUST* set the following fields
339 * in the cq->structure:
340 * - cq->num_sq_entries
343 * Do *NOT* hold the lock when calling this as the memory allocation routines
344 * called are not going to be atomic context safe
346 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
348 enum ice_status ret_code;
350 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
352 if (cq->sq.count > 0) {
353 /* queue already initialized */
354 ret_code = ICE_ERR_NOT_READY;
355 goto init_ctrlq_exit;
358 /* verify input for valid configuration */
359 if (!cq->num_sq_entries || !cq->sq_buf_size) {
360 ret_code = ICE_ERR_CFG;
361 goto init_ctrlq_exit;
364 cq->sq.next_to_use = 0;
365 cq->sq.next_to_clean = 0;
367 /* allocate the ring memory */
368 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
370 goto init_ctrlq_exit;
372 /* allocate buffers in the rings */
373 ret_code = ice_alloc_sq_bufs(hw, cq);
375 goto init_ctrlq_free_rings;
377 /* initialize base registers */
378 ret_code = ice_cfg_sq_regs(hw, cq);
380 goto init_ctrlq_free_rings;
383 cq->sq.count = cq->num_sq_entries;
384 goto init_ctrlq_exit;
386 init_ctrlq_free_rings:
387 ICE_FREE_CQ_BUFS(hw, cq, sq);
388 ice_free_cq_ring(hw, &cq->sq);
395 * ice_init_rq - initialize ARQ
396 * @hw: pointer to the hardware structure
397 * @cq: pointer to the specific Control queue
399 * The main initialization routine for the Admin Receive (Event) Queue.
400 * Prior to calling this function, the driver *MUST* set the following fields
401 * in the cq->structure:
402 * - cq->num_rq_entries
405 * Do *NOT* hold the lock when calling this as the memory allocation routines
406 * called are not going to be atomic context safe
408 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
410 enum ice_status ret_code;
412 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
414 if (cq->rq.count > 0) {
415 /* queue already initialized */
416 ret_code = ICE_ERR_NOT_READY;
417 goto init_ctrlq_exit;
420 /* verify input for valid configuration */
421 if (!cq->num_rq_entries || !cq->rq_buf_size) {
422 ret_code = ICE_ERR_CFG;
423 goto init_ctrlq_exit;
426 cq->rq.next_to_use = 0;
427 cq->rq.next_to_clean = 0;
429 /* allocate the ring memory */
430 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
432 goto init_ctrlq_exit;
434 /* allocate buffers in the rings */
435 ret_code = ice_alloc_rq_bufs(hw, cq);
437 goto init_ctrlq_free_rings;
439 /* initialize base registers */
440 ret_code = ice_cfg_rq_regs(hw, cq);
442 goto init_ctrlq_free_rings;
445 cq->rq.count = cq->num_rq_entries;
446 goto init_ctrlq_exit;
448 init_ctrlq_free_rings:
449 ICE_FREE_CQ_BUFS(hw, cq, rq);
450 ice_free_cq_ring(hw, &cq->rq);
457 * ice_shutdown_sq - shutdown the Control ATQ
458 * @hw: pointer to the hardware structure
459 * @cq: pointer to the specific Control queue
461 * The main shutdown routine for the Control Transmit Queue
463 static enum ice_status
464 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
466 enum ice_status ret_code = ICE_SUCCESS;
468 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
470 ice_acquire_lock(&cq->sq_lock);
473 ret_code = ICE_ERR_NOT_READY;
474 goto shutdown_sq_out;
477 /* Stop firmware AdminQ processing */
478 wr32(hw, cq->sq.head, 0);
479 wr32(hw, cq->sq.tail, 0);
480 wr32(hw, cq->sq.len, 0);
481 wr32(hw, cq->sq.bal, 0);
482 wr32(hw, cq->sq.bah, 0);
484 cq->sq.count = 0; /* to indicate uninitialized queue */
486 /* free ring buffers and the ring itself */
487 ICE_FREE_CQ_BUFS(hw, cq, sq);
488 ice_free_cq_ring(hw, &cq->sq);
491 ice_release_lock(&cq->sq_lock);
496 * ice_aq_ver_check - Check the reported AQ API version.
497 * @hw: pointer to the hardware structure
499 * Checks if the driver should load on a given AQ API version.
501 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
503 static bool ice_aq_ver_check(struct ice_hw *hw)
505 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
506 /* Major API version is newer than expected, don't load */
507 ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
509 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
510 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
511 ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
512 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
513 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
515 /* Major API version is older than expected, log a warning */
516 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
522 * ice_shutdown_rq - shutdown Control ARQ
523 * @hw: pointer to the hardware structure
524 * @cq: pointer to the specific Control queue
526 * The main shutdown routine for the Control Receive Queue
528 static enum ice_status
529 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
531 enum ice_status ret_code = ICE_SUCCESS;
533 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
535 ice_acquire_lock(&cq->rq_lock);
538 ret_code = ICE_ERR_NOT_READY;
539 goto shutdown_rq_out;
542 /* Stop Control Queue processing */
543 wr32(hw, cq->rq.head, 0);
544 wr32(hw, cq->rq.tail, 0);
545 wr32(hw, cq->rq.len, 0);
546 wr32(hw, cq->rq.bal, 0);
547 wr32(hw, cq->rq.bah, 0);
549 /* set rq.count to 0 to indicate uninitialized queue */
552 /* free ring buffers and the ring itself */
553 ICE_FREE_CQ_BUFS(hw, cq, rq);
554 ice_free_cq_ring(hw, &cq->rq);
557 ice_release_lock(&cq->rq_lock);
562 * ice_idle_aq - stop ARQ/ATQ processing momentarily
563 * @hw: pointer to the hardware structure
564 * @cq: pointer to the specific Control queue
566 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
568 wr32(hw, cq->sq.len, 0);
569 wr32(hw, cq->rq.len, 0);
571 ice_msec_delay(2, false);
575 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
576 * @hw: pointer to the hardware structure
578 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
580 struct ice_ctl_q_info *cq = &hw->adminq;
581 enum ice_status status;
583 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
585 status = ice_aq_get_fw_ver(hw, NULL);
587 goto init_ctrlq_free_rq;
589 if (!ice_aq_ver_check(hw)) {
590 status = ICE_ERR_FW_API_VER;
591 goto init_ctrlq_free_rq;
597 ice_shutdown_rq(hw, cq);
598 ice_shutdown_sq(hw, cq);
603 * ice_init_ctrlq - main initialization routine for any control Queue
604 * @hw: pointer to the hardware structure
605 * @q_type: specific Control queue type
607 * Prior to calling this function, the driver *MUST* set the following fields
608 * in the cq->structure:
609 * - cq->num_sq_entries
610 * - cq->num_rq_entries
614 * NOTE: this function does not initialize the controlq locks
616 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
618 struct ice_ctl_q_info *cq;
619 enum ice_status ret_code;
621 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
624 case ICE_CTL_Q_ADMIN:
625 ice_adminq_init_regs(hw);
628 case ICE_CTL_Q_MAILBOX:
629 ice_mailbox_init_regs(hw);
633 return ICE_ERR_PARAM;
637 /* verify input for valid configuration */
638 if (!cq->num_rq_entries || !cq->num_sq_entries ||
639 !cq->rq_buf_size || !cq->sq_buf_size) {
643 /* setup SQ command write back timeout */
644 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
646 /* allocate the ATQ */
647 ret_code = ice_init_sq(hw, cq);
651 /* allocate the ARQ */
652 ret_code = ice_init_rq(hw, cq);
654 goto init_ctrlq_free_sq;
660 ice_shutdown_sq(hw, cq);
665 * ice_shutdown_ctrlq - shutdown routine for any control queue
666 * @hw: pointer to the hardware structure
667 * @q_type: specific Control queue type
669 * NOTE: this function does not destroy the control queue locks.
671 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
673 struct ice_ctl_q_info *cq;
675 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
678 case ICE_CTL_Q_ADMIN:
680 if (ice_check_sq_alive(hw, cq))
681 ice_aq_q_shutdown(hw, true);
683 case ICE_CTL_Q_MAILBOX:
690 ice_shutdown_sq(hw, cq);
691 ice_shutdown_rq(hw, cq);
695 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
696 * @hw: pointer to the hardware structure
698 * NOTE: this function does not destroy the control queue locks. The driver
699 * may call this at runtime to shutdown and later restart control queues, such
700 * as in response to a reset event.
702 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
704 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
705 /* Shutdown FW admin queue */
706 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
707 /* Shutdown PF-VF Mailbox */
708 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
712 * ice_init_all_ctrlq - main initialization routine for all control queues
713 * @hw: pointer to the hardware structure
715 * Prior to calling this function, the driver MUST* set the following fields
716 * in the cq->structure for all control queues:
717 * - cq->num_sq_entries
718 * - cq->num_rq_entries
722 * NOTE: this function does not initialize the controlq locks.
724 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
726 enum ice_status status;
729 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
731 /* Init FW admin queue */
733 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
737 status = ice_init_check_adminq(hw);
738 if (status != ICE_ERR_AQ_FW_CRITICAL)
741 ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
742 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
743 ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
744 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
748 /* Init Mailbox queue */
749 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
753 * ice_init_ctrlq_locks - Initialize locks for a control queue
754 * @cq: pointer to the control queue
756 * Initializes the send and receive queue locks for a given control queue.
758 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
760 ice_init_lock(&cq->sq_lock);
761 ice_init_lock(&cq->rq_lock);
765 * ice_create_all_ctrlq - main initialization routine for all control queues
766 * @hw: pointer to the hardware structure
768 * Prior to calling this function, the driver *MUST* set the following fields
769 * in the cq->structure for all control queues:
770 * - cq->num_sq_entries
771 * - cq->num_rq_entries
775 * This function creates all the control queue locks and then calls
776 * ice_init_all_ctrlq. It should be called once during driver load. If the
777 * driver needs to re-initialize control queues at run time it should call
778 * ice_init_all_ctrlq instead.
780 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
782 ice_init_ctrlq_locks(&hw->adminq);
783 ice_init_ctrlq_locks(&hw->mailboxq);
785 return ice_init_all_ctrlq(hw);
789 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
790 * @cq: pointer to the control queue
792 * Destroys the send and receive queue locks for a given control queue.
794 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
796 ice_destroy_lock(&cq->sq_lock);
797 ice_destroy_lock(&cq->rq_lock);
801 * ice_destroy_all_ctrlq - exit routine for all control queues
802 * @hw: pointer to the hardware structure
804 * This function shuts down all the control queues and then destroys the
805 * control queue locks. It should be called once during driver unload. The
806 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
807 * reinitialize control queues, such as in response to a reset event.
809 void ice_destroy_all_ctrlq(struct ice_hw *hw)
811 /* shut down all the control queues first */
812 ice_shutdown_all_ctrlq(hw);
814 ice_destroy_ctrlq_locks(&hw->adminq);
815 ice_destroy_ctrlq_locks(&hw->mailboxq);
819 * ice_clean_sq - cleans Admin send queue (ATQ)
820 * @hw: pointer to the hardware structure
821 * @cq: pointer to the specific Control queue
823 * returns the number of free desc
825 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
827 struct ice_ctl_q_ring *sq = &cq->sq;
828 u16 ntc = sq->next_to_clean;
829 struct ice_sq_cd *details;
830 struct ice_aq_desc *desc;
832 desc = ICE_CTL_Q_DESC(*sq, ntc);
833 details = ICE_CTL_Q_DETAILS(*sq, ntc);
835 while (rd32(hw, cq->sq.head) != ntc) {
836 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
837 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
838 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
840 if (ntc == sq->count)
842 desc = ICE_CTL_Q_DESC(*sq, ntc);
843 details = ICE_CTL_Q_DETAILS(*sq, ntc);
846 sq->next_to_clean = ntc;
848 return ICE_CTL_Q_DESC_UNUSED(sq);
853 * @hw: pointer to the hardware structure
854 * @desc: pointer to control queue descriptor
855 * @buf: pointer to command buffer
856 * @buf_len: max length of buf
858 * Dumps debug log about control command with descriptor contents.
860 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
862 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
865 if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
871 datalen = LE16_TO_CPU(cq_desc->datalen);
872 flags = LE16_TO_CPU(cq_desc->flags);
874 ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
875 LE16_TO_CPU(cq_desc->opcode), flags, datalen,
876 LE16_TO_CPU(cq_desc->retval));
877 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
878 LE32_TO_CPU(cq_desc->cookie_high),
879 LE32_TO_CPU(cq_desc->cookie_low));
880 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
881 LE32_TO_CPU(cq_desc->params.generic.param0),
882 LE32_TO_CPU(cq_desc->params.generic.param1));
883 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
884 LE32_TO_CPU(cq_desc->params.generic.addr_high),
885 LE32_TO_CPU(cq_desc->params.generic.addr_low));
886 /* Dump buffer iff 1) one exists and 2) is either a response indicated
887 * by the DD and/or CMP flag set or a command with the RD flag set.
889 if (buf && cq_desc->datalen != 0 &&
890 (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
891 flags & ICE_AQ_FLAG_RD)) {
892 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
893 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
894 MIN_T(u16, buf_len, datalen));
899 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
900 * @hw: pointer to the HW struct
901 * @cq: pointer to the specific Control queue
903 * Returns true if the firmware has processed all descriptors on the
904 * admin send queue. Returns false if there are still requests pending.
906 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
908 /* AQ designers suggest use of head for better
909 * timing reliability than DD bit
911 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
915 * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
916 * @hw: pointer to the HW struct
917 * @cq: pointer to the specific Control queue
918 * @desc: prefilled descriptor describing the command (non DMA mem)
919 * @buf: buffer to use for indirect commands (or NULL for direct commands)
920 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
921 * @cd: pointer to command details structure
923 * This is the main send command routine for the ATQ. It runs the queue,
924 * cleans the queue, etc.
926 static enum ice_status
927 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
928 struct ice_aq_desc *desc, void *buf, u16 buf_size,
929 struct ice_sq_cd *cd)
931 struct ice_dma_mem *dma_buf = NULL;
932 struct ice_aq_desc *desc_on_ring;
933 bool cmd_completed = false;
934 enum ice_status status = ICE_SUCCESS;
935 struct ice_sq_cd *details;
940 /* if reset is in progress return a soft error */
941 if (hw->reset_ongoing)
942 return ICE_ERR_RESET_ONGOING;
944 cq->sq_last_status = ICE_AQ_RC_OK;
947 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
948 status = ICE_ERR_AQ_EMPTY;
949 goto sq_send_command_error;
952 if ((buf && !buf_size) || (!buf && buf_size)) {
953 status = ICE_ERR_PARAM;
954 goto sq_send_command_error;
958 if (buf_size > cq->sq_buf_size) {
959 ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
961 status = ICE_ERR_INVAL_SIZE;
962 goto sq_send_command_error;
965 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
966 if (buf_size > ICE_AQ_LG_BUF)
967 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
970 val = rd32(hw, cq->sq.head);
971 if (val >= cq->num_sq_entries) {
972 ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
974 status = ICE_ERR_AQ_EMPTY;
975 goto sq_send_command_error;
978 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
982 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
984 /* Call clean and check queue available function to reclaim the
985 * descriptors that were processed by FW/MBX; the function returns the
986 * number of desc available. The clean function called here could be
987 * called in a separate thread in case of asynchronous completions.
989 if (ice_clean_sq(hw, cq) == 0) {
990 ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
991 status = ICE_ERR_AQ_FULL;
992 goto sq_send_command_error;
995 /* initialize the temp desc pointer with the right desc */
996 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
998 /* if the desc is available copy the temp desc to the right place */
999 ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
1002 /* if buf is not NULL assume indirect command */
1004 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1005 /* copy the user buf into the respective DMA buf */
1006 ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
1007 desc_on_ring->datalen = CPU_TO_LE16(buf_size);
1009 /* Update the address values in the desc with the pa value
1010 * for respective buffer
1012 desc_on_ring->params.generic.addr_high =
1013 CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1014 desc_on_ring->params.generic.addr_low =
1015 CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1018 /* Debug desc and buffer */
1019 ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1021 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1023 (cq->sq.next_to_use)++;
1024 if (cq->sq.next_to_use == cq->sq.count)
1025 cq->sq.next_to_use = 0;
1026 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1029 if (ice_sq_done(hw, cq))
1032 ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
1034 } while (total_delay < cq->sq_cmd_timeout);
1036 /* if ready, copy the desc back to temp */
1037 if (ice_sq_done(hw, cq)) {
1038 ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1041 /* get returned length to copy */
1042 u16 copy_size = LE16_TO_CPU(desc->datalen);
1044 if (copy_size > buf_size) {
1045 ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1046 copy_size, buf_size);
1047 status = ICE_ERR_AQ_ERROR;
1049 ice_memcpy(buf, dma_buf->va, copy_size,
1053 retval = LE16_TO_CPU(desc->retval);
1055 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1056 LE16_TO_CPU(desc->opcode),
1059 /* strip off FW internal code */
1062 cmd_completed = true;
1063 if (!status && retval != ICE_AQ_RC_OK)
1064 status = ICE_ERR_AQ_ERROR;
1065 cq->sq_last_status = (enum ice_aq_err)retval;
1068 ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1070 ice_debug_cq(hw, (void *)desc, buf, buf_size);
1072 /* save writeback AQ if requested */
1073 if (details->wb_desc)
1074 ice_memcpy(details->wb_desc, desc_on_ring,
1075 sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1077 /* update the error if time out occurred */
1078 if (!cmd_completed) {
1079 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1080 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1081 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1082 status = ICE_ERR_AQ_FW_CRITICAL;
1084 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1085 status = ICE_ERR_AQ_TIMEOUT;
1089 sq_send_command_error:
1094 * ice_sq_send_cmd - send command to Control Queue (ATQ)
1095 * @hw: pointer to the HW struct
1096 * @cq: pointer to the specific Control queue
1097 * @desc: prefilled descriptor describing the command (non DMA mem)
1098 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1099 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1100 * @cd: pointer to command details structure
1102 * This is the main send command routine for the ATQ. It runs the queue,
1103 * cleans the queue, etc.
1106 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1107 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1108 struct ice_sq_cd *cd)
1110 enum ice_status status = ICE_SUCCESS;
1112 /* if reset is in progress return a soft error */
1113 if (hw->reset_ongoing)
1114 return ICE_ERR_RESET_ONGOING;
1116 ice_acquire_lock(&cq->sq_lock);
1117 status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1118 ice_release_lock(&cq->sq_lock);
1124 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1125 * @desc: pointer to the temp descriptor (non DMA mem)
1126 * @opcode: the opcode can be used to decide which flags to turn off or on
1128 * Fill the desc with default values
1130 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1132 /* zero out the desc */
1133 ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1134 desc->opcode = CPU_TO_LE16(opcode);
1135 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1140 * @hw: pointer to the HW struct
1141 * @cq: pointer to the specific Control queue
1142 * @e: event info from the receive descriptor, includes any buffers
1143 * @pending: number of events that could be left to process
1145 * This function cleans one Admin Receive Queue element and returns
1146 * the contents through e. It can also return how many events are
1147 * left to process through 'pending'.
1150 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1151 struct ice_rq_event_info *e, u16 *pending)
1153 u16 ntc = cq->rq.next_to_clean;
1154 enum ice_status ret_code = ICE_SUCCESS;
1155 struct ice_aq_desc *desc;
1156 struct ice_dma_mem *bi;
1162 /* pre-clean the event info */
1163 ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1165 /* take the lock before we start messing with the ring */
1166 ice_acquire_lock(&cq->rq_lock);
1168 if (!cq->rq.count) {
1169 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1170 ret_code = ICE_ERR_AQ_EMPTY;
1171 goto clean_rq_elem_err;
1174 /* set next_to_use to head */
1175 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1178 /* nothing to do - shouldn't need to update ring's values */
1179 ret_code = ICE_ERR_AQ_NO_WORK;
1180 goto clean_rq_elem_out;
1183 /* now clean the next descriptor */
1184 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1187 cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1188 flags = LE16_TO_CPU(desc->flags);
1189 if (flags & ICE_AQ_FLAG_ERR) {
1190 ret_code = ICE_ERR_AQ_ERROR;
1191 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1192 LE16_TO_CPU(desc->opcode),
1193 cq->rq_last_status);
1195 ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1196 datalen = LE16_TO_CPU(desc->datalen);
1197 e->msg_len = MIN_T(u16, datalen, e->buf_len);
1198 if (e->msg_buf && e->msg_len)
1199 ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1200 e->msg_len, ICE_DMA_TO_NONDMA);
1202 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1204 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1206 /* Restore the original datalen and buffer address in the desc,
1207 * FW updates datalen to indicate the event message size
1209 bi = &cq->rq.r.rq_bi[ntc];
1210 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1212 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1213 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1214 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1215 desc->datalen = CPU_TO_LE16(bi->size);
1216 desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1217 desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1219 /* set tail = the last cleaned desc index. */
1220 wr32(hw, cq->rq.tail, ntc);
1221 /* ntc is updated to tail + 1 */
1223 if (ntc == cq->num_rq_entries)
1225 cq->rq.next_to_clean = ntc;
1226 cq->rq.next_to_use = ntu;
1229 /* Set pending if needed, unlock and return */
1231 /* re-read HW head to calculate actual pending messages */
1232 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1233 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1236 ice_release_lock(&cq->rq_lock);