1 /******************************************************************************
3 Copyright (c) 2013-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "i40e_status.h"
36 #include "i40e_type.h"
37 #include "i40e_register.h"
38 #include "i40e_adminq.h"
39 #include "i40e_prototype.h"
42 * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
43 * @desc: API request descriptor
45 static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
47 return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48 desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
52 * i40e_adminq_init_regs - Initialize AdminQ registers
53 * @hw: pointer to the hardware structure
55 * This assumes the alloc_asq and alloc_arq functions have already been called
57 static void i40e_adminq_init_regs(struct i40e_hw *hw)
59 /* set head and tail registers in our local struct */
61 hw->aq.asq.tail = I40E_VF_ATQT1;
62 hw->aq.asq.head = I40E_VF_ATQH1;
63 hw->aq.asq.len = I40E_VF_ATQLEN1;
64 hw->aq.asq.bal = I40E_VF_ATQBAL1;
65 hw->aq.asq.bah = I40E_VF_ATQBAH1;
66 hw->aq.arq.tail = I40E_VF_ARQT1;
67 hw->aq.arq.head = I40E_VF_ARQH1;
68 hw->aq.arq.len = I40E_VF_ARQLEN1;
69 hw->aq.arq.bal = I40E_VF_ARQBAL1;
70 hw->aq.arq.bah = I40E_VF_ARQBAH1;
72 hw->aq.asq.tail = I40E_PF_ATQT;
73 hw->aq.asq.head = I40E_PF_ATQH;
74 hw->aq.asq.len = I40E_PF_ATQLEN;
75 hw->aq.asq.bal = I40E_PF_ATQBAL;
76 hw->aq.asq.bah = I40E_PF_ATQBAH;
77 hw->aq.arq.tail = I40E_PF_ARQT;
78 hw->aq.arq.head = I40E_PF_ARQH;
79 hw->aq.arq.len = I40E_PF_ARQLEN;
80 hw->aq.arq.bal = I40E_PF_ARQBAL;
81 hw->aq.arq.bah = I40E_PF_ARQBAH;
86 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
87 * @hw: pointer to the hardware structure
89 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
91 enum i40e_status_code ret_code;
93 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
95 (hw->aq.num_asq_entries *
96 sizeof(struct i40e_aq_desc)),
97 I40E_ADMINQ_DESC_ALIGNMENT);
101 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
102 (hw->aq.num_asq_entries *
103 sizeof(struct i40e_asq_cmd_details)));
105 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
113 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
114 * @hw: pointer to the hardware structure
116 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
118 enum i40e_status_code ret_code;
120 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
122 (hw->aq.num_arq_entries *
123 sizeof(struct i40e_aq_desc)),
124 I40E_ADMINQ_DESC_ALIGNMENT);
130 * i40e_free_adminq_asq - Free Admin Queue send rings
131 * @hw: pointer to the hardware structure
133 * This assumes the posted send buffers have already been cleaned
136 void i40e_free_adminq_asq(struct i40e_hw *hw)
138 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
142 * i40e_free_adminq_arq - Free Admin Queue receive rings
143 * @hw: pointer to the hardware structure
145 * This assumes the posted receive buffers have already been cleaned
148 void i40e_free_adminq_arq(struct i40e_hw *hw)
150 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
154 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
155 * @hw: pointer to the hardware structure
157 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
159 enum i40e_status_code ret_code;
160 struct i40e_aq_desc *desc;
161 struct i40e_dma_mem *bi;
164 /* We'll be allocating the buffer info memory first, then we can
165 * allocate the mapped buffers for the event processing
168 /* buffer_info structures do not need alignment */
169 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
170 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
173 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
175 /* allocate the mapped buffers */
176 for (i = 0; i < hw->aq.num_arq_entries; i++) {
177 bi = &hw->aq.arq.r.arq_bi[i];
178 ret_code = i40e_allocate_dma_mem(hw, bi,
181 I40E_ADMINQ_DESC_ALIGNMENT);
183 goto unwind_alloc_arq_bufs;
185 /* now configure the descriptors for use */
186 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
188 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
189 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
190 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
192 /* This is in accordance with Admin queue design, there is no
193 * register for buffer size configuration
195 desc->datalen = CPU_TO_LE16((u16)bi->size);
197 desc->cookie_high = 0;
198 desc->cookie_low = 0;
199 desc->params.external.addr_high =
200 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
201 desc->params.external.addr_low =
202 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
203 desc->params.external.param0 = 0;
204 desc->params.external.param1 = 0;
210 unwind_alloc_arq_bufs:
211 /* don't try to free the one that failed... */
214 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
215 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
221 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
222 * @hw: pointer to the hardware structure
224 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
226 enum i40e_status_code ret_code;
227 struct i40e_dma_mem *bi;
230 /* No mapped memory needed yet, just the buffer info structures */
231 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
232 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
235 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
237 /* allocate the mapped buffers */
238 for (i = 0; i < hw->aq.num_asq_entries; i++) {
239 bi = &hw->aq.asq.r.asq_bi[i];
240 ret_code = i40e_allocate_dma_mem(hw, bi,
243 I40E_ADMINQ_DESC_ALIGNMENT);
245 goto unwind_alloc_asq_bufs;
250 unwind_alloc_asq_bufs:
251 /* don't try to free the one that failed... */
254 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
255 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
261 * i40e_free_arq_bufs - Free receive queue buffer info elements
262 * @hw: pointer to the hardware structure
264 static void i40e_free_arq_bufs(struct i40e_hw *hw)
268 /* free descriptors */
269 for (i = 0; i < hw->aq.num_arq_entries; i++)
270 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
272 /* free the descriptor memory */
273 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
275 /* free the dma header */
276 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
280 * i40e_free_asq_bufs - Free send queue buffer info elements
281 * @hw: pointer to the hardware structure
283 static void i40e_free_asq_bufs(struct i40e_hw *hw)
287 /* only unmap if the address is non-NULL */
288 for (i = 0; i < hw->aq.num_asq_entries; i++)
289 if (hw->aq.asq.r.asq_bi[i].pa)
290 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
292 /* free the buffer info list */
293 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
295 /* free the descriptor memory */
296 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
298 /* free the dma header */
299 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
303 * i40e_config_asq_regs - configure ASQ registers
304 * @hw: pointer to the hardware structure
306 * Configure base address and length registers for the transmit queue
308 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
310 enum i40e_status_code ret_code = I40E_SUCCESS;
313 /* Clear Head and Tail */
314 wr32(hw, hw->aq.asq.head, 0);
315 wr32(hw, hw->aq.asq.tail, 0);
317 /* set starting point */
318 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
319 I40E_PF_ATQLEN_ATQENABLE_MASK));
320 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
321 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
323 /* Check one register to verify that config was applied */
324 reg = rd32(hw, hw->aq.asq.bal);
325 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
326 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
332 * i40e_config_arq_regs - ARQ register configuration
333 * @hw: pointer to the hardware structure
335 * Configure base address and length registers for the receive (event queue)
337 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
339 enum i40e_status_code ret_code = I40E_SUCCESS;
342 /* Clear Head and Tail */
343 wr32(hw, hw->aq.arq.head, 0);
344 wr32(hw, hw->aq.arq.tail, 0);
346 /* set starting point */
347 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
348 I40E_PF_ARQLEN_ARQENABLE_MASK));
349 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
350 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
352 /* Update tail in the HW to post pre-allocated buffers */
353 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
355 /* Check one register to verify that config was applied */
356 reg = rd32(hw, hw->aq.arq.bal);
357 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
358 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
364 * i40e_init_asq - main initialization routine for ASQ
365 * @hw: pointer to the hardware structure
367 * This is the main initialization routine for the Admin Send Queue
368 * Prior to calling this function, drivers *MUST* set the following fields
369 * in the hw->aq structure:
370 * - hw->aq.num_asq_entries
371 * - hw->aq.arq_buf_size
373 * Do *NOT* hold the lock when calling this as the memory allocation routines
374 * called are not going to be atomic context safe
376 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
378 enum i40e_status_code ret_code = I40E_SUCCESS;
380 if (hw->aq.asq.count > 0) {
381 /* queue already initialized */
382 ret_code = I40E_ERR_NOT_READY;
383 goto init_adminq_exit;
386 /* verify input for valid configuration */
387 if ((hw->aq.num_asq_entries == 0) ||
388 (hw->aq.asq_buf_size == 0)) {
389 ret_code = I40E_ERR_CONFIG;
390 goto init_adminq_exit;
393 hw->aq.asq.next_to_use = 0;
394 hw->aq.asq.next_to_clean = 0;
395 hw->aq.asq.count = hw->aq.num_asq_entries;
397 /* allocate the ring memory */
398 ret_code = i40e_alloc_adminq_asq_ring(hw);
399 if (ret_code != I40E_SUCCESS)
400 goto init_adminq_exit;
402 /* allocate buffers in the rings */
403 ret_code = i40e_alloc_asq_bufs(hw);
404 if (ret_code != I40E_SUCCESS)
405 goto init_adminq_free_rings;
407 /* initialize base registers */
408 ret_code = i40e_config_asq_regs(hw);
409 if (ret_code != I40E_SUCCESS)
410 goto init_adminq_free_rings;
413 goto init_adminq_exit;
415 init_adminq_free_rings:
416 i40e_free_adminq_asq(hw);
423 * i40e_init_arq - initialize ARQ
424 * @hw: pointer to the hardware structure
426 * The main initialization routine for the Admin Receive (Event) Queue.
427 * Prior to calling this function, drivers *MUST* set the following fields
428 * in the hw->aq structure:
429 * - hw->aq.num_asq_entries
430 * - hw->aq.arq_buf_size
432 * Do *NOT* hold the lock when calling this as the memory allocation routines
433 * called are not going to be atomic context safe
435 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
437 enum i40e_status_code ret_code = I40E_SUCCESS;
439 if (hw->aq.arq.count > 0) {
440 /* queue already initialized */
441 ret_code = I40E_ERR_NOT_READY;
442 goto init_adminq_exit;
445 /* verify input for valid configuration */
446 if ((hw->aq.num_arq_entries == 0) ||
447 (hw->aq.arq_buf_size == 0)) {
448 ret_code = I40E_ERR_CONFIG;
449 goto init_adminq_exit;
452 hw->aq.arq.next_to_use = 0;
453 hw->aq.arq.next_to_clean = 0;
454 hw->aq.arq.count = hw->aq.num_arq_entries;
456 /* allocate the ring memory */
457 ret_code = i40e_alloc_adminq_arq_ring(hw);
458 if (ret_code != I40E_SUCCESS)
459 goto init_adminq_exit;
461 /* allocate buffers in the rings */
462 ret_code = i40e_alloc_arq_bufs(hw);
463 if (ret_code != I40E_SUCCESS)
464 goto init_adminq_free_rings;
466 /* initialize base registers */
467 ret_code = i40e_config_arq_regs(hw);
468 if (ret_code != I40E_SUCCESS)
469 goto init_adminq_free_rings;
472 goto init_adminq_exit;
474 init_adminq_free_rings:
475 i40e_free_adminq_arq(hw);
482 * i40e_shutdown_asq - shutdown the ASQ
483 * @hw: pointer to the hardware structure
485 * The main shutdown routine for the Admin Send Queue
487 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
489 enum i40e_status_code ret_code = I40E_SUCCESS;
491 if (hw->aq.asq.count == 0)
492 return I40E_ERR_NOT_READY;
494 /* Stop firmware AdminQ processing */
495 wr32(hw, hw->aq.asq.head, 0);
496 wr32(hw, hw->aq.asq.tail, 0);
497 wr32(hw, hw->aq.asq.len, 0);
498 wr32(hw, hw->aq.asq.bal, 0);
499 wr32(hw, hw->aq.asq.bah, 0);
501 /* make sure spinlock is available */
502 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
504 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
506 /* free ring buffers */
507 i40e_free_asq_bufs(hw);
509 i40e_release_spinlock(&hw->aq.asq_spinlock);
515 * i40e_shutdown_arq - shutdown ARQ
516 * @hw: pointer to the hardware structure
518 * The main shutdown routine for the Admin Receive Queue
520 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
522 enum i40e_status_code ret_code = I40E_SUCCESS;
524 if (hw->aq.arq.count == 0)
525 return I40E_ERR_NOT_READY;
527 /* Stop firmware AdminQ processing */
528 wr32(hw, hw->aq.arq.head, 0);
529 wr32(hw, hw->aq.arq.tail, 0);
530 wr32(hw, hw->aq.arq.len, 0);
531 wr32(hw, hw->aq.arq.bal, 0);
532 wr32(hw, hw->aq.arq.bah, 0);
534 /* make sure spinlock is available */
535 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
537 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
539 /* free ring buffers */
540 i40e_free_arq_bufs(hw);
542 i40e_release_spinlock(&hw->aq.arq_spinlock);
548 * i40e_init_adminq - main initialization routine for Admin Queue
549 * @hw: pointer to the hardware structure
551 * Prior to calling this function, drivers *MUST* set the following fields
552 * in the hw->aq structure:
553 * - hw->aq.num_asq_entries
554 * - hw->aq.num_arq_entries
555 * - hw->aq.arq_buf_size
556 * - hw->aq.asq_buf_size
558 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
560 enum i40e_status_code ret_code;
561 u16 eetrack_lo, eetrack_hi;
563 /* verify input for valid configuration */
564 if ((hw->aq.num_arq_entries == 0) ||
565 (hw->aq.num_asq_entries == 0) ||
566 (hw->aq.arq_buf_size == 0) ||
567 (hw->aq.asq_buf_size == 0)) {
568 ret_code = I40E_ERR_CONFIG;
569 goto init_adminq_exit;
572 /* initialize spin locks */
573 i40e_init_spinlock(&hw->aq.asq_spinlock);
574 i40e_init_spinlock(&hw->aq.arq_spinlock);
576 /* Set up register offsets */
577 i40e_adminq_init_regs(hw);
579 /* setup ASQ command write back timeout */
580 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
582 /* allocate the ASQ */
583 ret_code = i40e_init_asq(hw);
584 if (ret_code != I40E_SUCCESS)
585 goto init_adminq_destroy_spinlocks;
587 /* allocate the ARQ */
588 ret_code = i40e_init_arq(hw);
589 if (ret_code != I40E_SUCCESS)
590 goto init_adminq_free_asq;
592 if (i40e_is_vf(hw)) /* VF has no need of firmware */
593 goto init_adminq_exit;
595 /* There are some cases where the firmware may not be quite ready
596 * for AdminQ operations, so we retry the AdminQ setup a few times
597 * if we see timeouts in this first AQ call.
600 ret_code = i40e_aq_get_firmware_version(hw,
606 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
609 i40e_msec_delay(100);
611 } while (retry < 10);
612 if (ret_code != I40E_SUCCESS)
613 goto init_adminq_free_arq;
615 /* get the NVM version info */
616 i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
617 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
618 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
619 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
621 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
622 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
623 goto init_adminq_free_arq;
626 /* pre-emptive resource lock release */
627 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
628 hw->aq.nvm_busy = FALSE;
630 ret_code = i40e_aq_set_hmc_resource_profile(hw,
631 I40E_HMC_PROFILE_DEFAULT,
634 ret_code = I40E_SUCCESS;
637 goto init_adminq_exit;
639 init_adminq_free_arq:
640 i40e_shutdown_arq(hw);
641 init_adminq_free_asq:
642 i40e_shutdown_asq(hw);
643 init_adminq_destroy_spinlocks:
644 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
645 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
652 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
653 * @hw: pointer to the hardware structure
655 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
657 enum i40e_status_code ret_code = I40E_SUCCESS;
659 if (i40e_check_asq_alive(hw))
660 i40e_aq_queue_shutdown(hw, TRUE);
662 i40e_shutdown_asq(hw);
663 i40e_shutdown_arq(hw);
665 /* destroy the spinlocks */
666 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
667 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
673 * i40e_clean_asq - cleans Admin send queue
674 * @hw: pointer to the hardware structure
676 * returns the number of free desc
678 u16 i40e_clean_asq(struct i40e_hw *hw)
680 struct i40e_adminq_ring *asq = &(hw->aq.asq);
681 struct i40e_asq_cmd_details *details;
682 u16 ntc = asq->next_to_clean;
683 struct i40e_aq_desc desc_cb;
684 struct i40e_aq_desc *desc;
686 desc = I40E_ADMINQ_DESC(*asq, ntc);
687 details = I40E_ADMINQ_DETAILS(*asq, ntc);
688 while (rd32(hw, hw->aq.asq.head) != ntc) {
689 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
690 "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
691 rd32(hw, hw->aq.asq.head));
693 if (details->callback) {
694 I40E_ADMINQ_CALLBACK cb_func =
695 (I40E_ADMINQ_CALLBACK)details->callback;
696 i40e_memcpy(&desc_cb, desc,
697 sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
698 cb_func(hw, &desc_cb);
700 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
701 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
703 if (ntc == asq->count)
705 desc = I40E_ADMINQ_DESC(*asq, ntc);
706 details = I40E_ADMINQ_DETAILS(*asq, ntc);
709 asq->next_to_clean = ntc;
711 return I40E_DESC_UNUSED(asq);
715 * i40e_asq_done - check if FW has processed the Admin Send Queue
716 * @hw: pointer to the hw struct
718 * Returns TRUE if the firmware has processed all descriptors on the
719 * admin send queue. Returns FALSE if there are still requests pending.
721 bool i40e_asq_done(struct i40e_hw *hw)
723 /* AQ designers suggest use of head for better
724 * timing reliability than DD bit
726 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
731 * i40e_asq_send_command - send command to Admin Queue
732 * @hw: pointer to the hw struct
733 * @desc: prefilled descriptor describing the command (non DMA mem)
734 * @buff: buffer to use for indirect commands
735 * @buff_size: size of buffer for indirect commands
736 * @cmd_details: pointer to command details structure
738 * This is the main send command driver routine for the Admin Queue send
739 * queue. It runs the queue, cleans the queue, etc
741 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
742 struct i40e_aq_desc *desc,
743 void *buff, /* can be NULL */
745 struct i40e_asq_cmd_details *cmd_details)
747 enum i40e_status_code status = I40E_SUCCESS;
748 struct i40e_dma_mem *dma_buff = NULL;
749 struct i40e_asq_cmd_details *details;
750 struct i40e_aq_desc *desc_on_ring;
751 bool cmd_completed = FALSE;
755 val = rd32(hw, hw->aq.asq.head);
756 if (val >= hw->aq.num_asq_entries) {
757 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
758 "AQTX: head overrun at %d\n", val);
759 status = I40E_ERR_QUEUE_EMPTY;
760 goto asq_send_command_exit;
763 if (hw->aq.asq.count == 0) {
764 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
765 "AQTX: Admin queue not initialized.\n");
766 status = I40E_ERR_QUEUE_EMPTY;
767 goto asq_send_command_exit;
770 if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
771 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
772 status = I40E_ERR_NVM;
773 goto asq_send_command_exit;
776 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
780 sizeof(struct i40e_asq_cmd_details),
781 I40E_NONDMA_TO_NONDMA);
783 /* If the cmd_details are defined copy the cookie. The
784 * CPU_TO_LE32 is not needed here because the data is ignored
785 * by the FW, only used by the driver
787 if (details->cookie) {
789 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
791 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
794 i40e_memset(details, 0,
795 sizeof(struct i40e_asq_cmd_details),
799 /* clear requested flags and then set additional flags if defined */
800 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
801 desc->flags |= CPU_TO_LE16(details->flags_ena);
803 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
805 if (buff_size > hw->aq.asq_buf_size) {
807 I40E_DEBUG_AQ_MESSAGE,
808 "AQTX: Invalid buffer size: %d.\n",
810 status = I40E_ERR_INVALID_SIZE;
811 goto asq_send_command_error;
814 if (details->postpone && !details->async) {
816 I40E_DEBUG_AQ_MESSAGE,
817 "AQTX: Async flag not set along with postpone flag");
818 status = I40E_ERR_PARAM;
819 goto asq_send_command_error;
822 /* call clean and check queue available function to reclaim the
823 * descriptors that were processed by FW, the function returns the
824 * number of desc available
826 /* the clean function called here could be called in a separate thread
827 * in case of asynchronous completions
829 if (i40e_clean_asq(hw) == 0) {
831 I40E_DEBUG_AQ_MESSAGE,
832 "AQTX: Error queue is full.\n");
833 status = I40E_ERR_ADMIN_QUEUE_FULL;
834 goto asq_send_command_error;
837 /* initialize the temp desc pointer with the right desc */
838 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
840 /* if the desc is available copy the temp desc to the right place */
841 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
844 /* if buff is not NULL assume indirect command */
846 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
847 /* copy the user buff into the respective DMA buff */
848 i40e_memcpy(dma_buff->va, buff, buff_size,
850 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
852 /* Update the address values in the desc with the pa value
853 * for respective buffer
855 desc_on_ring->params.external.addr_high =
856 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
857 desc_on_ring->params.external.addr_low =
858 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
862 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
863 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
865 (hw->aq.asq.next_to_use)++;
866 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
867 hw->aq.asq.next_to_use = 0;
868 if (!details->postpone)
869 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
871 /* if cmd_details are not defined or async flag is not set,
872 * we need to wait for desc write back
874 if (!details->async && !details->postpone) {
878 /* AQ designers suggest use of head for better
879 * timing reliability than DD bit
881 if (i40e_asq_done(hw))
883 /* ugh! delay while spin_lock */
886 } while (total_delay < hw->aq.asq_cmd_timeout);
889 /* if ready, copy the desc back to temp */
890 if (i40e_asq_done(hw)) {
891 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
894 i40e_memcpy(buff, dma_buff->va, buff_size,
896 retval = LE16_TO_CPU(desc->retval);
899 I40E_DEBUG_AQ_MESSAGE,
900 "AQTX: Command completed with error 0x%X.\n",
903 /* strip off FW internal code */
906 cmd_completed = TRUE;
907 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
908 status = I40E_SUCCESS;
910 status = I40E_ERR_ADMIN_QUEUE_ERROR;
911 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
914 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
915 "AQTX: desc and buffer writeback:\n");
916 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
918 /* update the error if time out occurred */
919 if ((!cmd_completed) &&
920 (!details->async && !details->postpone)) {
922 I40E_DEBUG_AQ_MESSAGE,
923 "AQTX: Writeback timeout.\n");
924 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
927 if (!status && i40e_is_nvm_update_op(desc))
928 hw->aq.nvm_busy = TRUE;
930 asq_send_command_error:
931 i40e_release_spinlock(&hw->aq.asq_spinlock);
932 asq_send_command_exit:
937 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
938 * @desc: pointer to the temp descriptor (non DMA mem)
939 * @opcode: the opcode can be used to decide which flags to turn off or on
941 * Fill the desc with default values
943 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
946 /* zero out the desc */
947 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
949 desc->opcode = CPU_TO_LE16(opcode);
950 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
954 * i40e_clean_arq_element
955 * @hw: pointer to the hw struct
956 * @e: event info from the receive descriptor, includes any buffers
957 * @pending: number of events that could be left to process
959 * This function cleans one Admin Receive Queue element and returns
960 * the contents through e. It can also return how many events are
961 * left to process through 'pending'
963 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
964 struct i40e_arq_event_info *e,
967 enum i40e_status_code ret_code = I40E_SUCCESS;
968 u16 ntc = hw->aq.arq.next_to_clean;
969 struct i40e_aq_desc *desc;
970 struct i40e_dma_mem *bi;
976 /* take the lock before we start messing with the ring */
977 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
979 /* set next_to_use to head */
980 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
982 /* nothing to do - shouldn't need to update ring's values */
983 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
984 goto clean_arq_element_out;
987 /* now clean the next descriptor */
988 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
991 flags = LE16_TO_CPU(desc->flags);
992 if (flags & I40E_AQ_FLAG_ERR) {
993 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
994 hw->aq.arq_last_status =
995 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
997 I40E_DEBUG_AQ_MESSAGE,
998 "AQRX: Event received with error 0x%X.\n",
999 hw->aq.arq_last_status);
1002 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1003 I40E_DMA_TO_NONDMA);
1004 datalen = LE16_TO_CPU(desc->datalen);
1005 e->msg_len = min(datalen, e->buf_len);
1006 if (e->msg_buf != NULL && (e->msg_len != 0))
1007 i40e_memcpy(e->msg_buf,
1008 hw->aq.arq.r.arq_bi[desc_idx].va,
1009 e->msg_len, I40E_DMA_TO_NONDMA);
1011 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1012 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1013 hw->aq.arq_buf_size);
1015 /* Restore the original datalen and buffer address in the desc,
1016 * FW updates datalen to indicate the event message
1019 bi = &hw->aq.arq.r.arq_bi[ntc];
1020 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1022 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1023 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1024 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1025 desc->datalen = CPU_TO_LE16((u16)bi->size);
1026 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1027 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1029 /* set tail = the last cleaned desc index. */
1030 wr32(hw, hw->aq.arq.tail, ntc);
1031 /* ntc is updated to tail + 1 */
1033 if (ntc == hw->aq.num_arq_entries)
1035 hw->aq.arq.next_to_clean = ntc;
1036 hw->aq.arq.next_to_use = ntu;
1038 clean_arq_element_out:
1039 /* Set pending if needed, unlock and return */
1040 if (pending != NULL)
1041 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1042 i40e_release_spinlock(&hw->aq.arq_spinlock);
1044 if (i40e_is_nvm_update_op(&e->desc)) {
1045 hw->aq.nvm_busy = FALSE;
1046 if (hw->aq.nvm_release_on_done) {
1047 i40e_release_nvm(hw);
1048 hw->aq.nvm_release_on_done = FALSE;
1055 void i40e_resume_aq(struct i40e_hw *hw)
1057 /* Registers are reset after PF reset */
1058 hw->aq.asq.next_to_use = 0;
1059 hw->aq.asq.next_to_clean = 0;
1061 #if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
1062 #error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
1064 i40e_config_asq_regs(hw);
1066 hw->aq.arq.next_to_use = 0;
1067 hw->aq.arq.next_to_clean = 0;
1069 i40e_config_arq_regs(hw);