1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2020, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include "ice_common.h"
35 #define ICE_CQ_INIT_REGS(qinfo, prefix) \
37 (qinfo)->sq.head = prefix##_ATQH; \
38 (qinfo)->sq.tail = prefix##_ATQT; \
39 (qinfo)->sq.len = prefix##_ATQLEN; \
40 (qinfo)->sq.bah = prefix##_ATQBAH; \
41 (qinfo)->sq.bal = prefix##_ATQBAL; \
42 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
43 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
44 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
45 (qinfo)->rq.head = prefix##_ARQH; \
46 (qinfo)->rq.tail = prefix##_ARQT; \
47 (qinfo)->rq.len = prefix##_ARQLEN; \
48 (qinfo)->rq.bah = prefix##_ARQBAH; \
49 (qinfo)->rq.bal = prefix##_ARQBAL; \
50 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
51 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
52 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
56 * ice_adminq_init_regs - Initialize AdminQ registers
57 * @hw: pointer to the hardware structure
59 * This assumes the alloc_sq and alloc_rq functions have already been called
61 static void ice_adminq_init_regs(struct ice_hw *hw)
63 struct ice_ctl_q_info *cq = &hw->adminq;
65 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
67 ICE_CQ_INIT_REGS(cq, PF_FW);
71 * ice_mailbox_init_regs - Initialize Mailbox registers
72 * @hw: pointer to the hardware structure
74 * This assumes the alloc_sq and alloc_rq functions have already been called
76 static void ice_mailbox_init_regs(struct ice_hw *hw)
78 struct ice_ctl_q_info *cq = &hw->mailboxq;
80 ICE_CQ_INIT_REGS(cq, PF_MBX);
85 * @hw: pointer to the HW struct
86 * @cq: pointer to the specific Control queue
88 * Returns true if Queue is enabled else false.
90 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
92 /* check both queue-length and queue-enable fields */
93 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
94 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
95 cq->sq.len_ena_mask)) ==
96 (cq->num_sq_entries | cq->sq.len_ena_mask);
102 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
103 * @hw: pointer to the hardware structure
104 * @cq: pointer to the specific Control queue
106 static enum ice_status
107 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
109 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
111 cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
112 if (!cq->sq.desc_buf.va)
113 return ICE_ERR_NO_MEMORY;
115 cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
116 sizeof(struct ice_sq_cd));
117 if (!cq->sq.cmd_buf) {
118 ice_free_dma_mem(hw, &cq->sq.desc_buf);
119 return ICE_ERR_NO_MEMORY;
126 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
127 * @hw: pointer to the hardware structure
128 * @cq: pointer to the specific Control queue
130 static enum ice_status
131 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
133 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
135 cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
136 if (!cq->rq.desc_buf.va)
137 return ICE_ERR_NO_MEMORY;
142 * ice_free_cq_ring - Free control queue ring
143 * @hw: pointer to the hardware structure
144 * @ring: pointer to the specific control queue ring
146 * This assumes the posted buffers have already been cleaned
149 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
151 ice_free_dma_mem(hw, &ring->desc_buf);
155 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
156 * @hw: pointer to the hardware structure
157 * @cq: pointer to the specific Control queue
159 static enum ice_status
160 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
164 /* We'll be allocating the buffer info memory first, then we can
165 * allocate the mapped buffers for the event processing
167 cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
168 sizeof(cq->rq.desc_buf));
169 if (!cq->rq.dma_head)
170 return ICE_ERR_NO_MEMORY;
171 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
173 /* allocate the mapped buffers */
174 for (i = 0; i < cq->num_rq_entries; i++) {
175 struct ice_aq_desc *desc;
176 struct ice_dma_mem *bi;
178 bi = &cq->rq.r.rq_bi[i];
179 bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
181 goto unwind_alloc_rq_bufs;
183 /* now configure the descriptors for use */
184 desc = ICE_CTL_Q_DESC(cq->rq, i);
186 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
187 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
188 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
190 /* This is in accordance with Admin queue design, there is no
191 * register for buffer size configuration
193 desc->datalen = CPU_TO_LE16(bi->size);
195 desc->cookie_high = 0;
196 desc->cookie_low = 0;
197 desc->params.generic.addr_high =
198 CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
199 desc->params.generic.addr_low =
200 CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
201 desc->params.generic.param0 = 0;
202 desc->params.generic.param1 = 0;
206 unwind_alloc_rq_bufs:
207 /* don't try to free the one that failed... */
210 ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
211 ice_free(hw, cq->rq.dma_head);
213 return ICE_ERR_NO_MEMORY;
217 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
218 * @hw: pointer to the hardware structure
219 * @cq: pointer to the specific Control queue
221 static enum ice_status
222 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
226 /* No mapped memory needed yet, just the buffer info structures */
227 cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
228 sizeof(cq->sq.desc_buf));
229 if (!cq->sq.dma_head)
230 return ICE_ERR_NO_MEMORY;
231 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
233 /* allocate the mapped buffers */
234 for (i = 0; i < cq->num_sq_entries; i++) {
235 struct ice_dma_mem *bi;
237 bi = &cq->sq.r.sq_bi[i];
238 bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
240 goto unwind_alloc_sq_bufs;
244 unwind_alloc_sq_bufs:
245 /* don't try to free the one that failed... */
248 ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
249 ice_free(hw, cq->sq.dma_head);
251 return ICE_ERR_NO_MEMORY;
254 static enum ice_status
255 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
257 /* Clear Head and Tail */
258 wr32(hw, ring->head, 0);
259 wr32(hw, ring->tail, 0);
261 /* set starting point */
262 wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
263 wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
264 wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
266 /* Check one register to verify that config was applied */
267 if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
268 return ICE_ERR_AQ_ERROR;
274 * ice_cfg_sq_regs - configure Control ATQ registers
275 * @hw: pointer to the hardware structure
276 * @cq: pointer to the specific Control queue
278 * Configure base address and length registers for the transmit queue
280 static enum ice_status
281 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
283 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
287 * ice_cfg_rq_regs - configure Control ARQ register
288 * @hw: pointer to the hardware structure
289 * @cq: pointer to the specific Control queue
291 * Configure base address and length registers for the receive (event queue)
293 static enum ice_status
294 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
296 enum ice_status status;
298 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
302 /* Update tail in the HW to post pre-allocated buffers */
303 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
309 * ice_init_sq - main initialization routine for Control ATQ
310 * @hw: pointer to the hardware structure
311 * @cq: pointer to the specific Control queue
313 * This is the main initialization routine for the Control Send Queue
314 * Prior to calling this function, the driver *MUST* set the following fields
315 * in the cq->structure:
316 * - cq->num_sq_entries
319 * Do *NOT* hold the lock when calling this as the memory allocation routines
320 * called are not going to be atomic context safe
322 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
324 enum ice_status ret_code;
326 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
328 if (cq->sq.count > 0) {
329 /* queue already initialized */
330 ret_code = ICE_ERR_NOT_READY;
331 goto init_ctrlq_exit;
334 /* verify input for valid configuration */
335 if (!cq->num_sq_entries || !cq->sq_buf_size) {
336 ret_code = ICE_ERR_CFG;
337 goto init_ctrlq_exit;
340 cq->sq.next_to_use = 0;
341 cq->sq.next_to_clean = 0;
343 /* allocate the ring memory */
344 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
346 goto init_ctrlq_exit;
348 /* allocate buffers in the rings */
349 ret_code = ice_alloc_sq_bufs(hw, cq);
351 goto init_ctrlq_free_rings;
353 /* initialize base registers */
354 ret_code = ice_cfg_sq_regs(hw, cq);
356 goto init_ctrlq_free_rings;
359 cq->sq.count = cq->num_sq_entries;
360 goto init_ctrlq_exit;
362 init_ctrlq_free_rings:
363 ice_free_cq_ring(hw, &cq->sq);
370 * ice_init_rq - initialize ARQ
371 * @hw: pointer to the hardware structure
372 * @cq: pointer to the specific Control queue
374 * The main initialization routine for the Admin Receive (Event) Queue.
375 * Prior to calling this function, the driver *MUST* set the following fields
376 * in the cq->structure:
377 * - cq->num_rq_entries
380 * Do *NOT* hold the lock when calling this as the memory allocation routines
381 * called are not going to be atomic context safe
383 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
385 enum ice_status ret_code;
387 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
389 if (cq->rq.count > 0) {
390 /* queue already initialized */
391 ret_code = ICE_ERR_NOT_READY;
392 goto init_ctrlq_exit;
395 /* verify input for valid configuration */
396 if (!cq->num_rq_entries || !cq->rq_buf_size) {
397 ret_code = ICE_ERR_CFG;
398 goto init_ctrlq_exit;
401 cq->rq.next_to_use = 0;
402 cq->rq.next_to_clean = 0;
404 /* allocate the ring memory */
405 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
407 goto init_ctrlq_exit;
409 /* allocate buffers in the rings */
410 ret_code = ice_alloc_rq_bufs(hw, cq);
412 goto init_ctrlq_free_rings;
414 /* initialize base registers */
415 ret_code = ice_cfg_rq_regs(hw, cq);
417 goto init_ctrlq_free_rings;
420 cq->rq.count = cq->num_rq_entries;
421 goto init_ctrlq_exit;
423 init_ctrlq_free_rings:
424 ice_free_cq_ring(hw, &cq->rq);
430 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
433 /* free descriptors */ \
434 for (i = 0; i < (qi)->num_##ring##_entries; i++) \
435 if ((qi)->ring.r.ring##_bi[i].pa) \
436 ice_free_dma_mem((hw), \
437 &(qi)->ring.r.ring##_bi[i]); \
438 /* free the buffer info list */ \
439 if ((qi)->ring.cmd_buf) \
440 ice_free(hw, (qi)->ring.cmd_buf); \
441 /* free DMA head */ \
442 ice_free(hw, (qi)->ring.dma_head); \
446 * ice_shutdown_sq - shutdown the Control ATQ
447 * @hw: pointer to the hardware structure
448 * @cq: pointer to the specific Control queue
450 * The main shutdown routine for the Control Transmit Queue
452 static enum ice_status
453 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
455 enum ice_status ret_code = ICE_SUCCESS;
457 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
459 ice_acquire_lock(&cq->sq_lock);
462 ret_code = ICE_ERR_NOT_READY;
463 goto shutdown_sq_out;
466 /* Stop firmware AdminQ processing */
467 wr32(hw, cq->sq.head, 0);
468 wr32(hw, cq->sq.tail, 0);
469 wr32(hw, cq->sq.len, 0);
470 wr32(hw, cq->sq.bal, 0);
471 wr32(hw, cq->sq.bah, 0);
473 cq->sq.count = 0; /* to indicate uninitialized queue */
475 /* free ring buffers and the ring itself */
476 ICE_FREE_CQ_BUFS(hw, cq, sq);
477 ice_free_cq_ring(hw, &cq->sq);
480 ice_release_lock(&cq->sq_lock);
485 * ice_aq_ver_check - Check the reported AQ API version.
486 * @hw: pointer to the hardware structure
488 * Checks if the driver should load on a given AQ API version.
490 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
492 static bool ice_aq_ver_check(struct ice_hw *hw)
494 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
495 /* Major API version is newer than expected, don't load */
496 ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
498 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
499 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
500 ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
501 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
502 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
504 /* Major API version is older than expected, log a warning */
505 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
511 * ice_shutdown_rq - shutdown Control ARQ
512 * @hw: pointer to the hardware structure
513 * @cq: pointer to the specific Control queue
515 * The main shutdown routine for the Control Receive Queue
517 static enum ice_status
518 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
520 enum ice_status ret_code = ICE_SUCCESS;
522 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
524 ice_acquire_lock(&cq->rq_lock);
527 ret_code = ICE_ERR_NOT_READY;
528 goto shutdown_rq_out;
531 /* Stop Control Queue processing */
532 wr32(hw, cq->rq.head, 0);
533 wr32(hw, cq->rq.tail, 0);
534 wr32(hw, cq->rq.len, 0);
535 wr32(hw, cq->rq.bal, 0);
536 wr32(hw, cq->rq.bah, 0);
538 /* set rq.count to 0 to indicate uninitialized queue */
541 /* free ring buffers and the ring itself */
542 ICE_FREE_CQ_BUFS(hw, cq, rq);
543 ice_free_cq_ring(hw, &cq->rq);
546 ice_release_lock(&cq->rq_lock);
551 * ice_idle_aq - stop ARQ/ATQ processing momentarily
552 * @hw: pointer to the hardware structure
553 * @cq: pointer to the specific Control queue
555 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
557 wr32(hw, cq->sq.len, 0);
558 wr32(hw, cq->rq.len, 0);
560 ice_msec_delay(2, false);
564 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
565 * @hw: pointer to the hardware structure
567 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
569 struct ice_ctl_q_info *cq = &hw->adminq;
570 enum ice_status status;
572 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
574 status = ice_aq_get_fw_ver(hw, NULL);
576 goto init_ctrlq_free_rq;
578 if (!ice_aq_ver_check(hw)) {
579 status = ICE_ERR_FW_API_VER;
580 goto init_ctrlq_free_rq;
586 ice_shutdown_rq(hw, cq);
587 ice_shutdown_sq(hw, cq);
592 * ice_init_ctrlq - main initialization routine for any control Queue
593 * @hw: pointer to the hardware structure
594 * @q_type: specific Control queue type
596 * Prior to calling this function, the driver *MUST* set the following fields
597 * in the cq->structure:
598 * - cq->num_sq_entries
599 * - cq->num_rq_entries
603 * NOTE: this function does not initialize the controlq locks
605 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
607 struct ice_ctl_q_info *cq;
608 enum ice_status ret_code;
610 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
613 case ICE_CTL_Q_ADMIN:
614 ice_adminq_init_regs(hw);
617 case ICE_CTL_Q_MAILBOX:
618 ice_mailbox_init_regs(hw);
622 return ICE_ERR_PARAM;
626 /* verify input for valid configuration */
627 if (!cq->num_rq_entries || !cq->num_sq_entries ||
628 !cq->rq_buf_size || !cq->sq_buf_size) {
632 /* setup SQ command write back timeout */
633 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
635 /* allocate the ATQ */
636 ret_code = ice_init_sq(hw, cq);
640 /* allocate the ARQ */
641 ret_code = ice_init_rq(hw, cq);
643 goto init_ctrlq_free_sq;
649 ice_shutdown_sq(hw, cq);
654 * ice_init_all_ctrlq - main initialization routine for all control queues
655 * @hw: pointer to the hardware structure
657 * Prior to calling this function, the driver MUST* set the following fields
658 * in the cq->structure for all control queues:
659 * - cq->num_sq_entries
660 * - cq->num_rq_entries
664 * NOTE: this function does not initialize the controlq locks.
666 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
668 enum ice_status status;
670 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
672 /* Init FW admin queue */
673 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
677 status = ice_init_check_adminq(hw);
680 /* Init Mailbox queue */
681 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
685 * ice_init_ctrlq_locks - Initialize locks for a control queue
686 * @cq: pointer to the control queue
688 * Initializes the send and receive queue locks for a given control queue.
690 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
692 ice_init_lock(&cq->sq_lock);
693 ice_init_lock(&cq->rq_lock);
697 * ice_create_all_ctrlq - main initialization routine for all control queues
698 * @hw: pointer to the hardware structure
700 * Prior to calling this function, the driver *MUST* set the following fields
701 * in the cq->structure for all control queues:
702 * - cq->num_sq_entries
703 * - cq->num_rq_entries
707 * This function creates all the control queue locks and then calls
708 * ice_init_all_ctrlq. It should be called once during driver load. If the
709 * driver needs to re-initialize control queues at run time it should call
710 * ice_init_all_ctrlq instead.
712 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
714 ice_init_ctrlq_locks(&hw->adminq);
715 ice_init_ctrlq_locks(&hw->mailboxq);
717 return ice_init_all_ctrlq(hw);
721 * ice_shutdown_ctrlq - shutdown routine for any control queue
722 * @hw: pointer to the hardware structure
723 * @q_type: specific Control queue type
725 * NOTE: this function does not destroy the control queue locks.
727 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
729 struct ice_ctl_q_info *cq;
731 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
734 case ICE_CTL_Q_ADMIN:
736 if (ice_check_sq_alive(hw, cq))
737 ice_aq_q_shutdown(hw, true);
739 case ICE_CTL_Q_MAILBOX:
746 ice_shutdown_sq(hw, cq);
747 ice_shutdown_rq(hw, cq);
751 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
752 * @hw: pointer to the hardware structure
754 * NOTE: this function does not destroy the control queue locks. The driver
755 * may call this at runtime to shutdown and later restart control queues, such
756 * as in response to a reset event.
758 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
760 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
761 /* Shutdown FW admin queue */
762 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
763 /* Shutdown PF-VF Mailbox */
764 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
768 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
769 * @cq: pointer to the control queue
771 * Destroys the send and receive queue locks for a given control queue.
774 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
776 ice_destroy_lock(&cq->sq_lock);
777 ice_destroy_lock(&cq->rq_lock);
781 * ice_destroy_all_ctrlq - exit routine for all control queues
782 * @hw: pointer to the hardware structure
784 * This function shuts down all the control queues and then destroys the
785 * control queue locks. It should be called once during driver unload. The
786 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
787 * reinitialize control queues, such as in response to a reset event.
789 void ice_destroy_all_ctrlq(struct ice_hw *hw)
791 /* shut down all the control queues first */
792 ice_shutdown_all_ctrlq(hw);
794 ice_destroy_ctrlq_locks(&hw->adminq);
795 ice_destroy_ctrlq_locks(&hw->mailboxq);
799 * ice_clean_sq - cleans Admin send queue (ATQ)
800 * @hw: pointer to the hardware structure
801 * @cq: pointer to the specific Control queue
803 * returns the number of free desc
805 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
807 struct ice_ctl_q_ring *sq = &cq->sq;
808 u16 ntc = sq->next_to_clean;
809 struct ice_sq_cd *details;
810 struct ice_aq_desc *desc;
812 desc = ICE_CTL_Q_DESC(*sq, ntc);
813 details = ICE_CTL_Q_DETAILS(*sq, ntc);
815 while (rd32(hw, cq->sq.head) != ntc) {
816 ice_debug(hw, ICE_DBG_AQ_MSG,
817 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
818 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
819 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
821 if (ntc == sq->count)
823 desc = ICE_CTL_Q_DESC(*sq, ntc);
824 details = ICE_CTL_Q_DETAILS(*sq, ntc);
827 sq->next_to_clean = ntc;
829 return ICE_CTL_Q_DESC_UNUSED(sq);
834 * @hw: pointer to the hardware structure
835 * @desc: pointer to control queue descriptor
836 * @buf: pointer to command buffer
837 * @buf_len: max length of buf
839 * Dumps debug log about control command with descriptor contents.
841 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
843 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
846 if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
852 datalen = LE16_TO_CPU(cq_desc->datalen);
853 flags = LE16_TO_CPU(cq_desc->flags);
855 ice_debug(hw, ICE_DBG_AQ_DESC,
856 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
857 LE16_TO_CPU(cq_desc->opcode), flags, datalen,
858 LE16_TO_CPU(cq_desc->retval));
859 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
860 LE32_TO_CPU(cq_desc->cookie_high),
861 LE32_TO_CPU(cq_desc->cookie_low));
862 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
863 LE32_TO_CPU(cq_desc->params.generic.param0),
864 LE32_TO_CPU(cq_desc->params.generic.param1));
865 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
866 LE32_TO_CPU(cq_desc->params.generic.addr_high),
867 LE32_TO_CPU(cq_desc->params.generic.addr_low));
868 /* Dump buffer iff 1) one exists and 2) is either a response indicated
869 * by the DD and/or CMP flag set or a command with the RD flag set.
871 if (buf && cq_desc->datalen != 0 &&
872 (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
873 flags & ICE_AQ_FLAG_RD)) {
874 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
875 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
876 MIN_T(u16, buf_len, datalen));
881 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
882 * @hw: pointer to the HW struct
883 * @cq: pointer to the specific Control queue
885 * Returns true if the firmware has processed all descriptors on the
886 * admin send queue. Returns false if there are still requests pending.
888 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
890 /* AQ designers suggest use of head for better
891 * timing reliability than DD bit
893 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
897 * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
898 * @hw: pointer to the HW struct
899 * @cq: pointer to the specific Control queue
900 * @desc: prefilled descriptor describing the command (non DMA mem)
901 * @buf: buffer to use for indirect commands (or NULL for direct commands)
902 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
903 * @cd: pointer to command details structure
905 * This is the main send command routine for the ATQ. It runs the queue,
906 * cleans the queue, etc.
908 static enum ice_status
909 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
910 struct ice_aq_desc *desc, void *buf, u16 buf_size,
911 struct ice_sq_cd *cd)
913 struct ice_dma_mem *dma_buf = NULL;
914 struct ice_aq_desc *desc_on_ring;
915 bool cmd_completed = false;
916 enum ice_status status = ICE_SUCCESS;
917 struct ice_sq_cd *details;
922 /* if reset is in progress return a soft error */
923 if (hw->reset_ongoing)
924 return ICE_ERR_RESET_ONGOING;
926 cq->sq_last_status = ICE_AQ_RC_OK;
929 ice_debug(hw, ICE_DBG_AQ_MSG,
930 "Control Send queue not initialized.\n");
931 status = ICE_ERR_AQ_EMPTY;
932 goto sq_send_command_error;
935 if ((buf && !buf_size) || (!buf && buf_size)) {
936 status = ICE_ERR_PARAM;
937 goto sq_send_command_error;
941 if (buf_size > cq->sq_buf_size) {
942 ice_debug(hw, ICE_DBG_AQ_MSG,
943 "Invalid buffer size for Control Send queue: %d.\n",
945 status = ICE_ERR_INVAL_SIZE;
946 goto sq_send_command_error;
949 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
950 if (buf_size > ICE_AQ_LG_BUF)
951 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
954 val = rd32(hw, cq->sq.head);
955 if (val >= cq->num_sq_entries) {
956 ice_debug(hw, ICE_DBG_AQ_MSG,
957 "head overrun at %d in the Control Send Queue ring\n",
959 status = ICE_ERR_AQ_EMPTY;
960 goto sq_send_command_error;
963 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
967 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
969 /* Call clean and check queue available function to reclaim the
970 * descriptors that were processed by FW/MBX; the function returns the
971 * number of desc available. The clean function called here could be
972 * called in a separate thread in case of asynchronous completions.
974 if (ice_clean_sq(hw, cq) == 0) {
975 ice_debug(hw, ICE_DBG_AQ_MSG,
976 "Error: Control Send Queue is full.\n");
977 status = ICE_ERR_AQ_FULL;
978 goto sq_send_command_error;
981 /* initialize the temp desc pointer with the right desc */
982 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
984 /* if the desc is available copy the temp desc to the right place */
985 ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
988 /* if buf is not NULL assume indirect command */
990 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
991 /* copy the user buf into the respective DMA buf */
992 ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
993 desc_on_ring->datalen = CPU_TO_LE16(buf_size);
995 /* Update the address values in the desc with the pa value
996 * for respective buffer
998 desc_on_ring->params.generic.addr_high =
999 CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1000 desc_on_ring->params.generic.addr_low =
1001 CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1004 /* Debug desc and buffer */
1005 ice_debug(hw, ICE_DBG_AQ_DESC,
1006 "ATQ: Control Send queue desc and buffer:\n");
1008 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1010 (cq->sq.next_to_use)++;
1011 if (cq->sq.next_to_use == cq->sq.count)
1012 cq->sq.next_to_use = 0;
1013 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1016 if (ice_sq_done(hw, cq))
1019 ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
1021 } while (total_delay < cq->sq_cmd_timeout);
1023 /* if ready, copy the desc back to temp */
1024 if (ice_sq_done(hw, cq)) {
1025 ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1028 /* get returned length to copy */
1029 u16 copy_size = LE16_TO_CPU(desc->datalen);
1031 if (copy_size > buf_size) {
1032 ice_debug(hw, ICE_DBG_AQ_MSG,
1033 "Return len %d > than buf len %d\n",
1034 copy_size, buf_size);
1035 status = ICE_ERR_AQ_ERROR;
1037 ice_memcpy(buf, dma_buf->va, copy_size,
1041 retval = LE16_TO_CPU(desc->retval);
1043 ice_debug(hw, ICE_DBG_AQ_MSG,
1044 "Control Send Queue command 0x%04X completed with error 0x%X\n",
1045 LE16_TO_CPU(desc->opcode),
1048 /* strip off FW internal code */
1051 cmd_completed = true;
1052 if (!status && retval != ICE_AQ_RC_OK)
1053 status = ICE_ERR_AQ_ERROR;
1054 cq->sq_last_status = (enum ice_aq_err)retval;
1057 ice_debug(hw, ICE_DBG_AQ_MSG,
1058 "ATQ: desc and buffer writeback:\n");
1060 ice_debug_cq(hw, (void *)desc, buf, buf_size);
1062 /* save writeback AQ if requested */
1063 if (details->wb_desc)
1064 ice_memcpy(details->wb_desc, desc_on_ring,
1065 sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1067 /* update the error if time out occurred */
1068 if (!cmd_completed) {
1069 ice_debug(hw, ICE_DBG_AQ_MSG,
1070 "Control Send Queue Writeback timeout.\n");
1071 status = ICE_ERR_AQ_TIMEOUT;
1074 sq_send_command_error:
1079 * ice_sq_send_cmd - send command to Control Queue (ATQ)
1080 * @hw: pointer to the HW struct
1081 * @cq: pointer to the specific Control queue
1082 * @desc: prefilled descriptor describing the command (non DMA mem)
1083 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1084 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1085 * @cd: pointer to command details structure
1087 * This is the main send command routine for the ATQ. It runs the queue,
1088 * cleans the queue, etc.
1091 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1092 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1093 struct ice_sq_cd *cd)
1095 enum ice_status status = ICE_SUCCESS;
1097 /* if reset is in progress return a soft error */
1098 if (hw->reset_ongoing)
1099 return ICE_ERR_RESET_ONGOING;
1101 ice_acquire_lock(&cq->sq_lock);
1102 status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1103 ice_release_lock(&cq->sq_lock);
1109 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1110 * @desc: pointer to the temp descriptor (non DMA mem)
1111 * @opcode: the opcode can be used to decide which flags to turn off or on
1113 * Fill the desc with default values
1115 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1117 /* zero out the desc */
1118 ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1119 desc->opcode = CPU_TO_LE16(opcode);
1120 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1125 * @hw: pointer to the HW struct
1126 * @cq: pointer to the specific Control queue
1127 * @e: event info from the receive descriptor, includes any buffers
1128 * @pending: number of events that could be left to process
1130 * This function cleans one Admin Receive Queue element and returns
1131 * the contents through e. It can also return how many events are
1132 * left to process through 'pending'.
1135 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1136 struct ice_rq_event_info *e, u16 *pending)
1138 u16 ntc = cq->rq.next_to_clean;
1139 enum ice_status ret_code = ICE_SUCCESS;
1140 struct ice_aq_desc *desc;
1141 struct ice_dma_mem *bi;
1147 /* pre-clean the event info */
1148 ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1150 /* take the lock before we start messing with the ring */
1151 ice_acquire_lock(&cq->rq_lock);
1153 if (!cq->rq.count) {
1154 ice_debug(hw, ICE_DBG_AQ_MSG,
1155 "Control Receive queue not initialized.\n");
1156 ret_code = ICE_ERR_AQ_EMPTY;
1157 goto clean_rq_elem_err;
1160 /* set next_to_use to head */
1161 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1164 /* nothing to do - shouldn't need to update ring's values */
1165 ret_code = ICE_ERR_AQ_NO_WORK;
1166 goto clean_rq_elem_out;
1169 /* now clean the next descriptor */
1170 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1173 cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1174 flags = LE16_TO_CPU(desc->flags);
1175 if (flags & ICE_AQ_FLAG_ERR) {
1176 ret_code = ICE_ERR_AQ_ERROR;
1177 ice_debug(hw, ICE_DBG_AQ_MSG,
1178 "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1179 LE16_TO_CPU(desc->opcode),
1180 cq->rq_last_status);
1182 ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1183 datalen = LE16_TO_CPU(desc->datalen);
1184 e->msg_len = MIN_T(u16, datalen, e->buf_len);
1185 if (e->msg_buf && e->msg_len)
1186 ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1187 e->msg_len, ICE_DMA_TO_NONDMA);
1189 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1191 ice_debug_cq(hw, (void *)desc, e->msg_buf,
1194 /* Restore the original datalen and buffer address in the desc,
1195 * FW updates datalen to indicate the event message size
1197 bi = &cq->rq.r.rq_bi[ntc];
1198 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1200 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1201 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1202 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1203 desc->datalen = CPU_TO_LE16(bi->size);
1204 desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1205 desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1207 /* set tail = the last cleaned desc index. */
1208 wr32(hw, cq->rq.tail, ntc);
1209 /* ntc is updated to tail + 1 */
1211 if (ntc == cq->num_rq_entries)
1213 cq->rq.next_to_clean = ntc;
1214 cq->rq.next_to_use = ntu;
1217 /* Set pending if needed, unlock and return */
1219 /* re-read HW head to calculate actual pending messages */
1220 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1221 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1224 ice_release_lock(&cq->rq_lock);