4 * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include "ena_gen_info.h"
39 /*****************************************************************************/
40 /*****************************************************************************/
42 /* Timeout in micro-sec */
43 #define ADMIN_CMD_TIMEOUT_US (3000000)
45 #define ENA_ASYNC_QUEUE_DEPTH 16
46 #define ENA_ADMIN_QUEUE_DEPTH 32
48 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
49 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
50 | (ENA_COMMON_SPEC_VERSION_MINOR))
52 #define ENA_CTRL_MAJOR 0
53 #define ENA_CTRL_MINOR 0
54 #define ENA_CTRL_SUB_MINOR 1
56 #define MIN_ENA_CTRL_VER \
57 (((ENA_CTRL_MAJOR) << \
58 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
59 ((ENA_CTRL_MINOR) << \
60 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
63 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
64 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
66 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
68 /*****************************************************************************/
69 /*****************************************************************************/
70 /*****************************************************************************/
75 /* Abort - canceled by the driver */
80 ena_wait_event_t wait_event;
81 struct ena_admin_acq_entry *user_cqe;
83 enum ena_cmd_status status;
84 /* status from the device */
90 struct ena_com_stats_ctx {
91 struct ena_admin_aq_get_stats_cmd get_cmd;
92 struct ena_admin_acq_get_stats_resp get_resp;
95 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
96 struct ena_common_mem_addr *ena_addr,
99 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
100 ena_trc_err("dma address has more bits that the device supports\n");
101 return ENA_COM_INVAL;
104 ena_addr->mem_addr_low = (u32)addr;
105 ena_addr->mem_addr_high = (u64)addr >> 32;
110 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
112 struct ena_com_admin_sq *sq = &queue->sq;
113 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
115 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
119 ena_trc_err("memory allocation failed");
120 return ENA_COM_NO_MEM;
132 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
134 struct ena_com_admin_cq *cq = &queue->cq;
135 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
137 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
141 ena_trc_err("memory allocation failed");
142 return ENA_COM_NO_MEM;
151 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
152 struct ena_aenq_handlers *aenq_handlers)
154 struct ena_com_aenq *aenq = &dev->aenq;
155 u32 addr_low, addr_high, aenq_caps;
158 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
159 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
160 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
165 if (!aenq->entries) {
166 ena_trc_err("memory allocation failed");
167 return ENA_COM_NO_MEM;
170 aenq->head = aenq->q_depth;
173 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
174 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
176 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
177 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
180 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
181 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
182 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
183 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
184 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
186 if (unlikely(!aenq_handlers)) {
187 ena_trc_err("aenq handlers pointer is NULL\n");
188 return ENA_COM_INVAL;
191 aenq->aenq_handlers = aenq_handlers;
196 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
197 struct ena_comp_ctx *comp_ctx)
199 comp_ctx->occupied = false;
200 ATOMIC32_DEC(&queue->outstanding_cmds);
203 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
204 u16 command_id, bool capture)
206 if (unlikely(command_id >= queue->q_depth)) {
207 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
208 command_id, queue->q_depth);
212 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
213 ena_trc_err("Completion context is occupied\n");
218 ATOMIC32_INC(&queue->outstanding_cmds);
219 queue->comp_ctx[command_id].occupied = true;
222 return &queue->comp_ctx[command_id];
225 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
226 struct ena_admin_aq_entry *cmd,
227 size_t cmd_size_in_bytes,
228 struct ena_admin_acq_entry *comp,
229 size_t comp_size_in_bytes)
231 struct ena_comp_ctx *comp_ctx;
232 u16 tail_masked, cmd_id;
236 queue_size_mask = admin_queue->q_depth - 1;
238 tail_masked = admin_queue->sq.tail & queue_size_mask;
240 /* In case of queue FULL */
241 cnt = admin_queue->sq.tail - admin_queue->sq.head;
242 if (cnt >= admin_queue->q_depth) {
243 ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
244 admin_queue->sq.tail,
245 admin_queue->sq.head,
246 admin_queue->q_depth);
247 admin_queue->stats.out_of_space++;
248 return ERR_PTR(ENA_COM_NO_SPACE);
251 cmd_id = admin_queue->curr_cmd_id;
253 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
254 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
256 cmd->aq_common_descriptor.command_id |= cmd_id &
257 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
259 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
260 if (unlikely(!comp_ctx))
261 return ERR_PTR(ENA_COM_INVAL);
263 comp_ctx->status = ENA_CMD_SUBMITTED;
264 comp_ctx->comp_size = (u32)comp_size_in_bytes;
265 comp_ctx->user_cqe = comp;
266 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
268 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
270 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
272 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
275 admin_queue->sq.tail++;
276 admin_queue->stats.submitted_cmd++;
278 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
279 admin_queue->sq.phase = !admin_queue->sq.phase;
281 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
282 admin_queue->sq.db_addr);
287 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
289 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
290 struct ena_comp_ctx *comp_ctx;
293 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
294 if (unlikely(!queue->comp_ctx)) {
295 ena_trc_err("memory allocation failed");
296 return ENA_COM_NO_MEM;
299 for (i = 0; i < queue->q_depth; i++) {
300 comp_ctx = get_comp_ctxt(queue, i, false);
302 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
308 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
309 struct ena_admin_aq_entry *cmd,
310 size_t cmd_size_in_bytes,
311 struct ena_admin_acq_entry *comp,
312 size_t comp_size_in_bytes)
315 struct ena_comp_ctx *comp_ctx;
317 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
318 if (unlikely(!admin_queue->running_state)) {
319 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
320 return ERR_PTR(ENA_COM_NO_DEVICE);
322 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
326 if (unlikely(IS_ERR(comp_ctx)))
327 admin_queue->running_state = false;
328 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
333 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
334 struct ena_com_create_io_ctx *ctx,
335 struct ena_com_io_sq *io_sq)
340 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
342 io_sq->desc_entry_size =
343 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
344 sizeof(struct ena_eth_io_tx_desc) :
345 sizeof(struct ena_eth_io_rx_desc);
347 size = io_sq->desc_entry_size * io_sq->q_depth;
348 io_sq->bus = ena_dev->bus;
350 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
351 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
353 io_sq->desc_addr.virt_addr,
354 io_sq->desc_addr.phys_addr,
355 io_sq->desc_addr.mem_handle,
358 if (!io_sq->desc_addr.virt_addr) {
359 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
361 io_sq->desc_addr.virt_addr,
362 io_sq->desc_addr.phys_addr,
363 io_sq->desc_addr.mem_handle);
366 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
368 io_sq->desc_addr.virt_addr,
371 if (!io_sq->desc_addr.virt_addr) {
372 io_sq->desc_addr.virt_addr =
373 ENA_MEM_ALLOC(ena_dev->dmadev, size);
377 if (!io_sq->desc_addr.virt_addr) {
378 ena_trc_err("memory allocation failed");
379 return ENA_COM_NO_MEM;
383 io_sq->next_to_comp = 0;
389 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
390 struct ena_com_create_io_ctx *ctx,
391 struct ena_com_io_cq *io_cq)
396 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
398 /* Use the basic completion descriptor for Rx */
399 io_cq->cdesc_entry_size_in_bytes =
400 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
401 sizeof(struct ena_eth_io_tx_cdesc) :
402 sizeof(struct ena_eth_io_rx_cdesc_base);
404 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
405 io_cq->bus = ena_dev->bus;
407 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
409 io_cq->cdesc_addr.virt_addr,
410 io_cq->cdesc_addr.phys_addr,
411 io_cq->cdesc_addr.mem_handle,
414 if (!io_cq->cdesc_addr.virt_addr) {
415 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
417 io_cq->cdesc_addr.virt_addr,
418 io_cq->cdesc_addr.phys_addr,
419 io_cq->cdesc_addr.mem_handle);
422 if (!io_cq->cdesc_addr.virt_addr) {
423 ena_trc_err("memory allocation failed");
424 return ENA_COM_NO_MEM;
433 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
434 struct ena_admin_acq_entry *cqe)
436 struct ena_comp_ctx *comp_ctx;
439 cmd_id = cqe->acq_common_descriptor.command &
440 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
442 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
443 if (unlikely(!comp_ctx)) {
444 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
445 admin_queue->running_state = false;
449 comp_ctx->status = ENA_CMD_COMPLETED;
450 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
452 if (comp_ctx->user_cqe)
453 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
455 if (!admin_queue->polling)
456 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
459 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
461 struct ena_admin_acq_entry *cqe = NULL;
466 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
467 phase = admin_queue->cq.phase;
469 cqe = &admin_queue->cq.entries[head_masked];
471 /* Go over all the completions */
472 while ((cqe->acq_common_descriptor.flags &
473 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
474 /* Do not read the rest of the completion entry before the
475 * phase bit was validated
478 ena_com_handle_single_admin_completion(admin_queue, cqe);
482 if (unlikely(head_masked == admin_queue->q_depth)) {
487 cqe = &admin_queue->cq.entries[head_masked];
490 admin_queue->cq.head += comp_num;
491 admin_queue->cq.phase = phase;
492 admin_queue->sq.head += comp_num;
493 admin_queue->stats.completed_cmd += comp_num;
496 static int ena_com_comp_status_to_errno(u8 comp_status)
498 if (unlikely(comp_status != 0))
499 ena_trc_err("admin command failed[%u]\n", comp_status);
501 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
502 return ENA_COM_INVAL;
504 switch (comp_status) {
505 case ENA_ADMIN_SUCCESS:
507 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
508 return ENA_COM_NO_MEM;
509 case ENA_ADMIN_UNSUPPORTED_OPCODE:
510 return ENA_COM_PERMISSION;
511 case ENA_ADMIN_BAD_OPCODE:
512 case ENA_ADMIN_MALFORMED_REQUEST:
513 case ENA_ADMIN_ILLEGAL_PARAMETER:
514 case ENA_ADMIN_UNKNOWN_ERROR:
515 return ENA_COM_INVAL;
521 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
522 struct ena_com_admin_queue *admin_queue)
524 unsigned long flags, timeout;
527 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
530 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
531 ena_com_handle_admin_completion(admin_queue);
532 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
534 if (comp_ctx->status != ENA_CMD_SUBMITTED)
537 if (ENA_TIME_EXPIRE(timeout)) {
538 ena_trc_err("Wait for completion (polling) timeout\n");
539 /* ENA didn't have any completion */
540 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
541 admin_queue->stats.no_completion++;
542 admin_queue->running_state = false;
543 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
545 ret = ENA_COM_TIMER_EXPIRED;
552 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
553 ena_trc_err("Command was aborted\n");
554 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
555 admin_queue->stats.aborted_cmd++;
556 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
557 ret = ENA_COM_NO_DEVICE;
561 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
562 "Invalid comp status %d\n", comp_ctx->status);
564 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
566 comp_ctxt_release(admin_queue, comp_ctx);
570 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
571 struct ena_com_admin_queue *admin_queue)
576 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
577 admin_queue->completion_timeout);
579 /* In case the command wasn't completed find out the root cause.
580 * There might be 2 kinds of errors
581 * 1) No completion (timeout reached)
582 * 2) There is completion but the device didn't get any msi-x interrupt.
584 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
585 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
586 ena_com_handle_admin_completion(admin_queue);
587 admin_queue->stats.no_completion++;
588 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
590 if (comp_ctx->status == ENA_CMD_COMPLETED)
591 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
592 comp_ctx->cmd_opcode);
594 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
595 comp_ctx->cmd_opcode, comp_ctx->status);
597 admin_queue->running_state = false;
598 ret = ENA_COM_TIMER_EXPIRED;
602 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
604 comp_ctxt_release(admin_queue, comp_ctx);
608 /* This method read the hardware device register through posting writes
609 * and waiting for response
610 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
612 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
614 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
615 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
616 mmio_read->read_resp;
617 u32 mmio_read_reg, timeout, ret;
623 timeout = mmio_read->reg_read_to ? : ENA_REG_READ_TIMEOUT;
625 /* If readless is disabled, perform regular read */
626 if (!mmio_read->readless_supported)
627 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
629 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
630 mmio_read->seq_num++;
632 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
633 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
634 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
635 mmio_read_reg |= mmio_read->seq_num &
636 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
638 /* make sure read_resp->req_id get updated before the hw can write
643 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
645 for (i = 0; i < timeout; i++) {
646 if (read_resp->req_id == mmio_read->seq_num)
652 if (unlikely(i == timeout)) {
653 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
658 ret = ENA_MMIO_READ_TIMEOUT;
662 if (read_resp->reg_off != offset) {
663 ena_trc_err("Read failure: wrong offset provided");
664 ret = ENA_MMIO_READ_TIMEOUT;
666 ret = read_resp->reg_val;
669 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
674 /* There are two types to wait for completion.
675 * Polling mode - wait until the completion is available.
676 * Async mode - wait on wait queue until the completion is ready
677 * (or the timeout expired).
678 * It is expected that the IRQ called ena_com_handle_admin_completion
679 * to mark the completions.
681 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
682 struct ena_com_admin_queue *admin_queue)
684 if (admin_queue->polling)
685 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
688 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
692 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
693 struct ena_com_io_sq *io_sq)
695 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
696 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
697 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
701 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
703 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
704 direction = ENA_ADMIN_SQ_DIRECTION_TX;
706 direction = ENA_ADMIN_SQ_DIRECTION_RX;
708 destroy_cmd.sq.sq_identity |= (direction <<
709 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
710 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
712 destroy_cmd.sq.sq_idx = io_sq->idx;
713 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
715 ret = ena_com_execute_admin_command(admin_queue,
716 (struct ena_admin_aq_entry *)&destroy_cmd,
718 (struct ena_admin_acq_entry *)&destroy_resp,
719 sizeof(destroy_resp));
721 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
722 ena_trc_err("failed to destroy io sq error: %d\n", ret);
727 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
728 struct ena_com_io_sq *io_sq,
729 struct ena_com_io_cq *io_cq)
733 if (io_cq->cdesc_addr.virt_addr) {
734 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
736 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
738 io_cq->cdesc_addr.virt_addr,
739 io_cq->cdesc_addr.phys_addr,
740 io_cq->cdesc_addr.mem_handle);
742 io_cq->cdesc_addr.virt_addr = NULL;
745 if (io_sq->desc_addr.virt_addr) {
746 size = io_sq->desc_entry_size * io_sq->q_depth;
748 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
749 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
751 io_sq->desc_addr.virt_addr,
752 io_sq->desc_addr.phys_addr,
753 io_sq->desc_addr.mem_handle);
755 ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
757 io_sq->desc_addr.virt_addr = NULL;
761 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
766 for (i = 0; i < timeout; i++) {
767 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
769 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
770 ena_trc_err("Reg read timeout occurred\n");
771 return ENA_COM_TIMER_EXPIRED;
774 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
778 /* The resolution of the timeout is 100ms */
782 return ENA_COM_TIMER_EXPIRED;
785 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
786 enum ena_admin_aq_feature_id feature_id)
788 u32 feature_mask = 1 << feature_id;
790 /* Device attributes is always supported */
791 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
792 !(ena_dev->supported_features & feature_mask))
798 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
799 struct ena_admin_get_feat_resp *get_resp,
800 enum ena_admin_aq_feature_id feature_id,
801 dma_addr_t control_buf_dma_addr,
802 u32 control_buff_size)
804 struct ena_com_admin_queue *admin_queue;
805 struct ena_admin_get_feat_cmd get_cmd;
808 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
809 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
810 return ENA_COM_PERMISSION;
813 memset(&get_cmd, 0x0, sizeof(get_cmd));
814 admin_queue = &ena_dev->admin_queue;
816 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
818 if (control_buff_size)
819 get_cmd.aq_common_descriptor.flags =
820 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
822 get_cmd.aq_common_descriptor.flags = 0;
824 ret = ena_com_mem_addr_set(ena_dev,
825 &get_cmd.control_buffer.address,
826 control_buf_dma_addr);
828 ena_trc_err("memory address set failed\n");
832 get_cmd.control_buffer.length = control_buff_size;
834 get_cmd.feat_common.feature_id = feature_id;
836 ret = ena_com_execute_admin_command(admin_queue,
837 (struct ena_admin_aq_entry *)
840 (struct ena_admin_acq_entry *)
845 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
851 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
852 struct ena_admin_get_feat_resp *get_resp,
853 enum ena_admin_aq_feature_id feature_id)
855 return ena_com_get_feature_ex(ena_dev,
862 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
864 struct ena_rss *rss = &ena_dev->rss;
866 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
867 sizeof(*rss->hash_key),
869 rss->hash_key_dma_addr,
870 rss->hash_key_mem_handle);
872 if (unlikely(!rss->hash_key))
873 return ENA_COM_NO_MEM;
878 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
880 struct ena_rss *rss = &ena_dev->rss;
883 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
884 sizeof(*rss->hash_key),
886 rss->hash_key_dma_addr,
887 rss->hash_key_mem_handle);
888 rss->hash_key = NULL;
891 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
893 struct ena_rss *rss = &ena_dev->rss;
895 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
896 sizeof(*rss->hash_ctrl),
898 rss->hash_ctrl_dma_addr,
899 rss->hash_ctrl_mem_handle);
901 if (unlikely(!rss->hash_ctrl))
902 return ENA_COM_NO_MEM;
907 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
909 struct ena_rss *rss = &ena_dev->rss;
912 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
913 sizeof(*rss->hash_ctrl),
915 rss->hash_ctrl_dma_addr,
916 rss->hash_ctrl_mem_handle);
917 rss->hash_ctrl = NULL;
920 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
923 struct ena_rss *rss = &ena_dev->rss;
924 struct ena_admin_get_feat_resp get_resp;
928 ret = ena_com_get_feature(ena_dev, &get_resp,
929 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
933 if ((get_resp.u.ind_table.min_size > log_size) ||
934 (get_resp.u.ind_table.max_size < log_size)) {
935 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
937 1 << get_resp.u.ind_table.min_size,
938 1 << get_resp.u.ind_table.max_size);
939 return ENA_COM_INVAL;
942 tbl_size = (1ULL << log_size) *
943 sizeof(struct ena_admin_rss_ind_table_entry);
945 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
948 rss->rss_ind_tbl_dma_addr,
949 rss->rss_ind_tbl_mem_handle);
950 if (unlikely(!rss->rss_ind_tbl))
953 tbl_size = (1ULL << log_size) * sizeof(u16);
954 rss->host_rss_ind_tbl =
955 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
956 if (unlikely(!rss->host_rss_ind_tbl))
959 rss->tbl_log_size = log_size;
964 tbl_size = (1ULL << log_size) *
965 sizeof(struct ena_admin_rss_ind_table_entry);
967 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
970 rss->rss_ind_tbl_dma_addr,
971 rss->rss_ind_tbl_mem_handle);
972 rss->rss_ind_tbl = NULL;
974 rss->tbl_log_size = 0;
975 return ENA_COM_NO_MEM;
978 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
980 struct ena_rss *rss = &ena_dev->rss;
981 size_t tbl_size = (1ULL << rss->tbl_log_size) *
982 sizeof(struct ena_admin_rss_ind_table_entry);
984 if (rss->rss_ind_tbl)
985 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
988 rss->rss_ind_tbl_dma_addr,
989 rss->rss_ind_tbl_mem_handle);
990 rss->rss_ind_tbl = NULL;
992 if (rss->host_rss_ind_tbl)
993 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
994 rss->host_rss_ind_tbl = NULL;
997 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
998 struct ena_com_io_sq *io_sq, u16 cq_idx)
1000 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1001 struct ena_admin_aq_create_sq_cmd create_cmd;
1002 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1006 memset(&create_cmd, 0x0, sizeof(create_cmd));
1008 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1010 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1011 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1013 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1015 create_cmd.sq_identity |= (direction <<
1016 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1017 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1019 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1020 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1022 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1023 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1024 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1026 create_cmd.sq_caps_3 |=
1027 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1029 create_cmd.cq_idx = cq_idx;
1030 create_cmd.sq_depth = io_sq->q_depth;
1032 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1033 ret = ena_com_mem_addr_set(ena_dev,
1035 io_sq->desc_addr.phys_addr);
1036 if (unlikely(ret)) {
1037 ena_trc_err("memory address set failed\n");
1042 ret = ena_com_execute_admin_command(admin_queue,
1043 (struct ena_admin_aq_entry *)&create_cmd,
1045 (struct ena_admin_acq_entry *)&cmd_completion,
1046 sizeof(cmd_completion));
1047 if (unlikely(ret)) {
1048 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1052 io_sq->idx = cmd_completion.sq_idx;
1054 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1055 (uintptr_t)cmd_completion.sq_doorbell_offset);
1057 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1058 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1059 + cmd_completion.llq_headers_offset);
1061 io_sq->desc_addr.pbuf_dev_addr =
1062 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1063 cmd_completion.llq_descriptors_offset);
1066 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1071 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1073 struct ena_rss *rss = &ena_dev->rss;
1074 struct ena_com_io_sq *io_sq;
1078 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1079 qid = rss->host_rss_ind_tbl[i];
1080 if (qid >= ENA_TOTAL_NUM_QUEUES)
1081 return ENA_COM_INVAL;
1083 io_sq = &ena_dev->io_sq_queues[qid];
1085 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1086 return ENA_COM_INVAL;
1088 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1094 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1096 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1097 struct ena_rss *rss = &ena_dev->rss;
1101 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1102 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1104 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1105 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1106 return ENA_COM_INVAL;
1107 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1109 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1110 return ENA_COM_INVAL;
1112 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1118 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1122 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1124 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1125 if (!ena_dev->intr_moder_tbl)
1126 return ENA_COM_NO_MEM;
1128 ena_com_config_default_interrupt_moderation_table(ena_dev);
1133 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1134 u16 intr_delay_resolution)
1136 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1139 if (!intr_delay_resolution) {
1140 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1141 intr_delay_resolution = 1;
1143 ena_dev->intr_delay_resolution = intr_delay_resolution;
1146 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1147 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1150 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1153 /*****************************************************************************/
1154 /******************************* API ******************************/
1155 /*****************************************************************************/
1157 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1158 struct ena_admin_aq_entry *cmd,
1160 struct ena_admin_acq_entry *comp,
1163 struct ena_comp_ctx *comp_ctx;
1166 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1168 if (unlikely(IS_ERR(comp_ctx))) {
1169 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1170 ena_trc_dbg("Failed to submit command [%ld]\n",
1173 ena_trc_err("Failed to submit command [%ld]\n",
1176 return PTR_ERR(comp_ctx);
1179 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1180 if (unlikely(ret)) {
1181 if (admin_queue->running_state)
1182 ena_trc_err("Failed to process command. ret = %d\n",
1185 ena_trc_dbg("Failed to process command. ret = %d\n",
1191 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1192 struct ena_com_io_cq *io_cq)
1194 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1195 struct ena_admin_aq_create_cq_cmd create_cmd;
1196 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1199 memset(&create_cmd, 0x0, sizeof(create_cmd));
1201 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1203 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1204 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1205 create_cmd.cq_caps_1 |=
1206 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1208 create_cmd.msix_vector = io_cq->msix_vector;
1209 create_cmd.cq_depth = io_cq->q_depth;
1211 ret = ena_com_mem_addr_set(ena_dev,
1213 io_cq->cdesc_addr.phys_addr);
1214 if (unlikely(ret)) {
1215 ena_trc_err("memory address set failed\n");
1219 ret = ena_com_execute_admin_command(admin_queue,
1220 (struct ena_admin_aq_entry *)&create_cmd,
1222 (struct ena_admin_acq_entry *)&cmd_completion,
1223 sizeof(cmd_completion));
1224 if (unlikely(ret)) {
1225 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1229 io_cq->idx = cmd_completion.cq_idx;
1231 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1232 cmd_completion.cq_interrupt_unmask_register_offset);
1234 if (cmd_completion.cq_head_db_register_offset)
1235 io_cq->cq_head_db_reg =
1236 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1237 cmd_completion.cq_head_db_register_offset);
1239 if (cmd_completion.numa_node_register_offset)
1240 io_cq->numa_node_cfg_reg =
1241 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1242 cmd_completion.numa_node_register_offset);
1244 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1249 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1250 struct ena_com_io_sq **io_sq,
1251 struct ena_com_io_cq **io_cq)
1253 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1254 ena_trc_err("Invalid queue number %d but the max is %d\n",
1255 qid, ENA_TOTAL_NUM_QUEUES);
1256 return ENA_COM_INVAL;
1259 *io_sq = &ena_dev->io_sq_queues[qid];
1260 *io_cq = &ena_dev->io_cq_queues[qid];
1265 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1267 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1268 struct ena_comp_ctx *comp_ctx;
1271 if (!admin_queue->comp_ctx)
1274 for (i = 0; i < admin_queue->q_depth; i++) {
1275 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1276 if (unlikely(!comp_ctx))
1279 comp_ctx->status = ENA_CMD_ABORTED;
1281 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1285 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1287 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1288 unsigned long flags;
1290 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1291 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1292 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1294 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1296 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1299 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1300 struct ena_com_io_cq *io_cq)
1302 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1303 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1304 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1307 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1309 destroy_cmd.cq_idx = io_cq->idx;
1310 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1312 ret = ena_com_execute_admin_command(admin_queue,
1313 (struct ena_admin_aq_entry *)&destroy_cmd,
1314 sizeof(destroy_cmd),
1315 (struct ena_admin_acq_entry *)&destroy_resp,
1316 sizeof(destroy_resp));
1318 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1319 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1324 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1326 return ena_dev->admin_queue.running_state;
1329 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1331 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1332 unsigned long flags;
1334 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1335 ena_dev->admin_queue.running_state = state;
1336 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1339 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1341 u16 depth = ena_dev->aenq.q_depth;
1343 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1345 /* Init head_db to mark that all entries in the queue
1346 * are initially available
1348 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1351 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1353 struct ena_com_admin_queue *admin_queue;
1354 struct ena_admin_set_feat_cmd cmd;
1355 struct ena_admin_set_feat_resp resp;
1356 struct ena_admin_get_feat_resp get_resp;
1359 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1361 ena_trc_info("Can't get aenq configuration\n");
1365 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1366 ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1367 get_resp.u.aenq.supported_groups,
1369 return ENA_COM_PERMISSION;
1372 memset(&cmd, 0x0, sizeof(cmd));
1373 admin_queue = &ena_dev->admin_queue;
1375 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1376 cmd.aq_common_descriptor.flags = 0;
1377 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1378 cmd.u.aenq.enabled_groups = groups_flag;
1380 ret = ena_com_execute_admin_command(admin_queue,
1381 (struct ena_admin_aq_entry *)&cmd,
1383 (struct ena_admin_acq_entry *)&resp,
1387 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1392 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1394 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1397 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1398 ena_trc_err("Reg read timeout occurred\n");
1399 return ENA_COM_TIMER_EXPIRED;
1402 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1403 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1405 ena_trc_dbg("ENA dma width: %d\n", width);
1407 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1408 ena_trc_err("DMA width illegal value: %d\n", width);
1409 return ENA_COM_INVAL;
1412 ena_dev->dma_addr_bits = width;
1417 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1421 u32 ctrl_ver_masked;
1423 /* Make sure the ENA version and the controller version are at least
1424 * as the driver expects
1426 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1427 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1428 ENA_REGS_CONTROLLER_VERSION_OFF);
1430 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1431 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1432 ena_trc_err("Reg read timeout occurred\n");
1433 return ENA_COM_TIMER_EXPIRED;
1436 ena_trc_info("ena device version: %d.%d\n",
1437 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1438 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1439 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1441 if (ver < MIN_ENA_VER) {
1442 ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
1446 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1447 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1448 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1449 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1450 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1451 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1452 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1453 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1456 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1457 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1458 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1460 /* Validate the ctrl version without the implementation ID */
1461 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1462 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1469 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1471 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1472 struct ena_com_admin_cq *cq = &admin_queue->cq;
1473 struct ena_com_admin_sq *sq = &admin_queue->sq;
1474 struct ena_com_aenq *aenq = &ena_dev->aenq;
1477 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1479 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1481 if (admin_queue->comp_ctx)
1482 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
1484 admin_queue->comp_ctx = NULL;
1485 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1487 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1488 sq->dma_addr, sq->mem_handle);
1491 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1493 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1494 cq->dma_addr, cq->mem_handle);
1497 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1498 if (ena_dev->aenq.entries)
1499 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1500 aenq->dma_addr, aenq->mem_handle);
1501 aenq->entries = NULL;
1504 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1506 ena_dev->admin_queue.polling = polling;
1509 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1511 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1513 ENA_SPINLOCK_INIT(mmio_read->lock);
1514 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1515 sizeof(*mmio_read->read_resp),
1516 mmio_read->read_resp,
1517 mmio_read->read_resp_dma_addr,
1518 mmio_read->read_resp_mem_handle);
1519 if (unlikely(!mmio_read->read_resp))
1520 return ENA_COM_NO_MEM;
1522 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1524 mmio_read->read_resp->req_id = 0x0;
1525 mmio_read->seq_num = 0x0;
1526 mmio_read->readless_supported = true;
1531 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1533 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1535 mmio_read->readless_supported = readless_supported;
1538 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1540 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1542 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1543 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1545 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1546 sizeof(*mmio_read->read_resp),
1547 mmio_read->read_resp,
1548 mmio_read->read_resp_dma_addr,
1549 mmio_read->read_resp_mem_handle);
1551 mmio_read->read_resp = NULL;
1553 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1556 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1558 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1559 u32 addr_low, addr_high;
1561 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1562 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1564 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1565 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1568 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1569 struct ena_aenq_handlers *aenq_handlers,
1572 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1573 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1577 ena_trc_info("ena_defs : Version:[%s] Build date [%s]",
1578 ENA_GEN_COMMIT, ENA_GEN_DATE);
1580 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1582 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1583 ena_trc_err("Reg read timeout occurred\n");
1584 return ENA_COM_TIMER_EXPIRED;
1587 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1588 ena_trc_err("Device isn't ready, abort com init\n");
1589 return ENA_COM_NO_DEVICE;
1592 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1594 admin_queue->bus = ena_dev->bus;
1595 admin_queue->q_dmadev = ena_dev->dmadev;
1596 admin_queue->polling = false;
1597 admin_queue->curr_cmd_id = 0;
1599 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1602 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1604 ret = ena_com_init_comp_ctxt(admin_queue);
1608 ret = ena_com_admin_init_sq(admin_queue);
1612 ret = ena_com_admin_init_cq(admin_queue);
1616 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1617 ENA_REGS_AQ_DB_OFF);
1619 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1620 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1622 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1623 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1625 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1626 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1628 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1629 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1632 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1633 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1634 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1635 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1638 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1639 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1640 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1641 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1643 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1644 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1645 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1649 admin_queue->running_state = true;
1653 ena_com_admin_destroy(ena_dev);
1658 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1659 struct ena_com_create_io_ctx *ctx)
1661 struct ena_com_io_sq *io_sq;
1662 struct ena_com_io_cq *io_cq;
1665 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1666 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1667 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1668 return ENA_COM_INVAL;
1671 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1672 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1674 memset(io_sq, 0x0, sizeof(*io_sq));
1675 memset(io_cq, 0x0, sizeof(*io_cq));
1678 io_cq->q_depth = ctx->queue_size;
1679 io_cq->direction = ctx->direction;
1680 io_cq->qid = ctx->qid;
1682 io_cq->msix_vector = ctx->msix_vector;
1684 io_sq->q_depth = ctx->queue_size;
1685 io_sq->direction = ctx->direction;
1686 io_sq->qid = ctx->qid;
1688 io_sq->mem_queue_type = ctx->mem_queue_type;
1690 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1691 /* header length is limited to 8 bits */
1692 io_sq->tx_max_header_size =
1693 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1695 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1698 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1702 ret = ena_com_create_io_cq(ena_dev, io_cq);
1706 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1713 ena_com_destroy_io_cq(ena_dev, io_cq);
1715 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1719 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1721 struct ena_com_io_sq *io_sq;
1722 struct ena_com_io_cq *io_cq;
1724 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1725 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1726 qid, ENA_TOTAL_NUM_QUEUES);
1730 io_sq = &ena_dev->io_sq_queues[qid];
1731 io_cq = &ena_dev->io_cq_queues[qid];
1733 ena_com_destroy_io_sq(ena_dev, io_sq);
1734 ena_com_destroy_io_cq(ena_dev, io_cq);
1736 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1739 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1740 struct ena_admin_get_feat_resp *resp)
1742 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1745 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1746 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1748 struct ena_admin_get_feat_resp get_resp;
1751 rc = ena_com_get_feature(ena_dev, &get_resp,
1752 ENA_ADMIN_DEVICE_ATTRIBUTES);
1756 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1757 sizeof(get_resp.u.dev_attr));
1758 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1760 rc = ena_com_get_feature(ena_dev, &get_resp,
1761 ENA_ADMIN_MAX_QUEUES_NUM);
1765 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1766 sizeof(get_resp.u.max_queue));
1767 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1769 rc = ena_com_get_feature(ena_dev, &get_resp,
1770 ENA_ADMIN_AENQ_CONFIG);
1774 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1775 sizeof(get_resp.u.aenq));
1777 rc = ena_com_get_feature(ena_dev, &get_resp,
1778 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1782 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1783 sizeof(get_resp.u.offload));
1785 /* Driver hints isn't mandatory admin command. So in case the
1786 * command isn't supported set driver hints to 0
1788 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1791 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1792 sizeof(get_resp.u.hw_hints));
1793 else if (rc == ENA_COM_PERMISSION)
1794 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
1801 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1803 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1806 /* ena_handle_specific_aenq_event:
1807 * return the handler that is relevant to the specific event group
1809 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1812 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1814 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1815 return aenq_handlers->handlers[group];
1817 return aenq_handlers->unimplemented_handler;
1820 /* ena_aenq_intr_handler:
1821 * handles the aenq incoming events.
1822 * pop events from the queue and apply the specific handler
1824 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1826 struct ena_admin_aenq_entry *aenq_e;
1827 struct ena_admin_aenq_common_desc *aenq_common;
1828 struct ena_com_aenq *aenq = &dev->aenq;
1829 ena_aenq_handler handler_cb;
1830 u16 masked_head, processed = 0;
1833 masked_head = aenq->head & (aenq->q_depth - 1);
1834 phase = aenq->phase;
1835 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1836 aenq_common = &aenq_e->aenq_common_desc;
1838 /* Go over all the events */
1839 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1841 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%jus]\n",
1843 aenq_common->syndrom,
1844 (u64)aenq_common->timestamp_low +
1845 ((u64)aenq_common->timestamp_high << 32));
1847 /* Handle specific event*/
1848 handler_cb = ena_com_get_specific_aenq_cb(dev,
1849 aenq_common->group);
1850 handler_cb(data, aenq_e); /* call the actual event handler*/
1852 /* Get next event entry */
1856 if (unlikely(masked_head == aenq->q_depth)) {
1860 aenq_e = &aenq->entries[masked_head];
1861 aenq_common = &aenq_e->aenq_common_desc;
1864 aenq->head += processed;
1865 aenq->phase = phase;
1867 /* Don't update aenq doorbell if there weren't any processed events */
1871 /* write the aenq doorbell after all AENQ descriptors were read */
1873 ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1876 int ena_com_dev_reset(struct ena_com_dev *ena_dev)
1878 u32 stat, timeout, cap, reset_val;
1881 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1882 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1884 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1885 (cap == ENA_MMIO_READ_TIMEOUT))) {
1886 ena_trc_err("Reg read32 timeout occurred\n");
1887 return ENA_COM_TIMER_EXPIRED;
1890 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1891 ena_trc_err("Device isn't ready, can't reset device\n");
1892 return ENA_COM_INVAL;
1895 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1896 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1898 ena_trc_err("Invalid timeout value\n");
1899 return ENA_COM_INVAL;
1903 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1904 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1906 /* Write again the MMIO read request address */
1907 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1909 rc = wait_for_reset_state(ena_dev, timeout,
1910 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1912 ena_trc_err("Reset indication didn't turn on\n");
1917 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1918 rc = wait_for_reset_state(ena_dev, timeout, 0);
1920 ena_trc_err("Reset indication didn't turn off\n");
1924 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
1925 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
1927 /* the resolution of timeout reg is 100ms */
1928 ena_dev->admin_queue.completion_timeout = timeout * 100000;
1930 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1935 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1936 struct ena_com_stats_ctx *ctx,
1937 enum ena_admin_get_stats_type type)
1939 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1940 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1941 struct ena_com_admin_queue *admin_queue;
1944 admin_queue = &ena_dev->admin_queue;
1946 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1947 get_cmd->aq_common_descriptor.flags = 0;
1948 get_cmd->type = type;
1950 ret = ena_com_execute_admin_command(admin_queue,
1951 (struct ena_admin_aq_entry *)get_cmd,
1953 (struct ena_admin_acq_entry *)get_resp,
1957 ena_trc_err("Failed to get stats. error: %d\n", ret);
1962 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1963 struct ena_admin_basic_stats *stats)
1965 struct ena_com_stats_ctx ctx;
1968 memset(&ctx, 0x0, sizeof(ctx));
1969 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
1970 if (likely(ret == 0))
1971 memcpy(stats, &ctx.get_resp.basic_stats,
1972 sizeof(ctx.get_resp.basic_stats));
1977 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
1979 struct ena_com_admin_queue *admin_queue;
1980 struct ena_admin_set_feat_cmd cmd;
1981 struct ena_admin_set_feat_resp resp;
1984 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
1985 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
1986 return ENA_COM_PERMISSION;
1989 memset(&cmd, 0x0, sizeof(cmd));
1990 admin_queue = &ena_dev->admin_queue;
1992 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1993 cmd.aq_common_descriptor.flags = 0;
1994 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
1995 cmd.u.mtu.mtu = mtu;
1997 ret = ena_com_execute_admin_command(admin_queue,
1998 (struct ena_admin_aq_entry *)&cmd,
2000 (struct ena_admin_acq_entry *)&resp,
2004 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2009 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2010 struct ena_admin_feature_offload_desc *offload)
2013 struct ena_admin_get_feat_resp resp;
2015 ret = ena_com_get_feature(ena_dev, &resp,
2016 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
2017 if (unlikely(ret)) {
2018 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2022 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2027 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2029 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2030 struct ena_rss *rss = &ena_dev->rss;
2031 struct ena_admin_set_feat_cmd cmd;
2032 struct ena_admin_set_feat_resp resp;
2033 struct ena_admin_get_feat_resp get_resp;
2036 if (!ena_com_check_supported_feature_id(ena_dev,
2037 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2038 ena_trc_dbg("Feature %d isn't supported\n",
2039 ENA_ADMIN_RSS_HASH_FUNCTION);
2040 return ENA_COM_PERMISSION;
2043 /* Validate hash function is supported */
2044 ret = ena_com_get_feature(ena_dev, &get_resp,
2045 ENA_ADMIN_RSS_HASH_FUNCTION);
2049 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2050 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2052 return ENA_COM_PERMISSION;
2055 memset(&cmd, 0x0, sizeof(cmd));
2057 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2058 cmd.aq_common_descriptor.flags =
2059 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2060 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2061 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2062 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2064 ret = ena_com_mem_addr_set(ena_dev,
2065 &cmd.control_buffer.address,
2066 rss->hash_key_dma_addr);
2067 if (unlikely(ret)) {
2068 ena_trc_err("memory address set failed\n");
2072 cmd.control_buffer.length = sizeof(*rss->hash_key);
2074 ret = ena_com_execute_admin_command(admin_queue,
2075 (struct ena_admin_aq_entry *)&cmd,
2077 (struct ena_admin_acq_entry *)&resp,
2079 if (unlikely(ret)) {
2080 ena_trc_err("Failed to set hash function %d. error: %d\n",
2081 rss->hash_func, ret);
2082 return ENA_COM_INVAL;
2088 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2089 enum ena_admin_hash_functions func,
2090 const u8 *key, u16 key_len, u32 init_val)
2092 struct ena_rss *rss = &ena_dev->rss;
2093 struct ena_admin_get_feat_resp get_resp;
2094 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2098 /* Make sure size is a mult of DWs */
2099 if (unlikely(key_len & 0x3))
2100 return ENA_COM_INVAL;
2102 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2103 ENA_ADMIN_RSS_HASH_FUNCTION,
2104 rss->hash_key_dma_addr,
2105 sizeof(*rss->hash_key));
2109 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2110 ena_trc_err("Flow hash function %d isn't supported\n", func);
2111 return ENA_COM_PERMISSION;
2115 case ENA_ADMIN_TOEPLITZ:
2116 if (key_len > sizeof(hash_key->key)) {
2117 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2118 key_len, sizeof(hash_key->key));
2119 return ENA_COM_INVAL;
2122 memcpy(hash_key->key, key, key_len);
2123 rss->hash_init_val = init_val;
2124 hash_key->keys_num = key_len >> 2;
2126 case ENA_ADMIN_CRC32:
2127 rss->hash_init_val = init_val;
2130 ena_trc_err("Invalid hash function (%d)\n", func);
2131 return ENA_COM_INVAL;
2134 rc = ena_com_set_hash_function(ena_dev);
2136 /* Restore the old function */
2138 ena_com_get_hash_function(ena_dev, NULL, NULL);
2143 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2144 enum ena_admin_hash_functions *func,
2147 struct ena_rss *rss = &ena_dev->rss;
2148 struct ena_admin_get_feat_resp get_resp;
2149 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2153 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2154 ENA_ADMIN_RSS_HASH_FUNCTION,
2155 rss->hash_key_dma_addr,
2156 sizeof(*rss->hash_key));
2160 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2162 *func = rss->hash_func;
2165 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2170 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2171 enum ena_admin_flow_hash_proto proto,
2174 struct ena_rss *rss = &ena_dev->rss;
2175 struct ena_admin_get_feat_resp get_resp;
2178 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2179 ENA_ADMIN_RSS_HASH_INPUT,
2180 rss->hash_ctrl_dma_addr,
2181 sizeof(*rss->hash_ctrl));
2186 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2191 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2193 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2194 struct ena_rss *rss = &ena_dev->rss;
2195 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2196 struct ena_admin_set_feat_cmd cmd;
2197 struct ena_admin_set_feat_resp resp;
2200 if (!ena_com_check_supported_feature_id(ena_dev,
2201 ENA_ADMIN_RSS_HASH_INPUT)) {
2202 ena_trc_dbg("Feature %d isn't supported\n",
2203 ENA_ADMIN_RSS_HASH_INPUT);
2204 return ENA_COM_PERMISSION;
2207 memset(&cmd, 0x0, sizeof(cmd));
2209 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2210 cmd.aq_common_descriptor.flags =
2211 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2212 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2213 cmd.u.flow_hash_input.enabled_input_sort =
2214 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2215 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2217 ret = ena_com_mem_addr_set(ena_dev,
2218 &cmd.control_buffer.address,
2219 rss->hash_ctrl_dma_addr);
2220 if (unlikely(ret)) {
2221 ena_trc_err("memory address set failed\n");
2224 cmd.control_buffer.length = sizeof(*hash_ctrl);
2226 ret = ena_com_execute_admin_command(admin_queue,
2227 (struct ena_admin_aq_entry *)&cmd,
2229 (struct ena_admin_acq_entry *)&resp,
2232 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2237 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2239 struct ena_rss *rss = &ena_dev->rss;
2240 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2242 u16 available_fields = 0;
2245 /* Get the supported hash input */
2246 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2250 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2251 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2252 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2254 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2255 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2256 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2258 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2259 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2260 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2262 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2263 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2264 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2266 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2267 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2269 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2270 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2272 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2273 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2275 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2276 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2278 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2279 available_fields = hash_ctrl->selected_fields[i].fields &
2280 hash_ctrl->supported_fields[i].fields;
2281 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2282 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2283 i, hash_ctrl->supported_fields[i].fields,
2284 hash_ctrl->selected_fields[i].fields);
2285 return ENA_COM_PERMISSION;
2289 rc = ena_com_set_hash_ctrl(ena_dev);
2291 /* In case of failure, restore the old hash ctrl */
2293 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2298 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2299 enum ena_admin_flow_hash_proto proto,
2302 struct ena_rss *rss = &ena_dev->rss;
2303 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2304 u16 supported_fields;
2307 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2308 ena_trc_err("Invalid proto num (%u)\n", proto);
2309 return ENA_COM_INVAL;
2312 /* Get the ctrl table */
2313 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2317 /* Make sure all the fields are supported */
2318 supported_fields = hash_ctrl->supported_fields[proto].fields;
2319 if ((hash_fields & supported_fields) != hash_fields) {
2320 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2321 proto, hash_fields, supported_fields);
2324 hash_ctrl->selected_fields[proto].fields = hash_fields;
2326 rc = ena_com_set_hash_ctrl(ena_dev);
2328 /* In case of failure, restore the old hash ctrl */
2330 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2335 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2336 u16 entry_idx, u16 entry_value)
2338 struct ena_rss *rss = &ena_dev->rss;
2340 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2341 return ENA_COM_INVAL;
2343 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2344 return ENA_COM_INVAL;
2346 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2351 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2353 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2354 struct ena_rss *rss = &ena_dev->rss;
2355 struct ena_admin_set_feat_cmd cmd;
2356 struct ena_admin_set_feat_resp resp;
2359 if (!ena_com_check_supported_feature_id(ena_dev,
2360 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2361 ena_trc_dbg("Feature %d isn't supported\n",
2362 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2363 return ENA_COM_PERMISSION;
2366 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2368 ena_trc_err("Failed to convert host indirection table to device table\n");
2372 memset(&cmd, 0x0, sizeof(cmd));
2374 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2375 cmd.aq_common_descriptor.flags =
2376 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2377 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2378 cmd.u.ind_table.size = rss->tbl_log_size;
2379 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2381 ret = ena_com_mem_addr_set(ena_dev,
2382 &cmd.control_buffer.address,
2383 rss->rss_ind_tbl_dma_addr);
2384 if (unlikely(ret)) {
2385 ena_trc_err("memory address set failed\n");
2389 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2390 sizeof(struct ena_admin_rss_ind_table_entry);
2392 ret = ena_com_execute_admin_command(admin_queue,
2393 (struct ena_admin_aq_entry *)&cmd,
2395 (struct ena_admin_acq_entry *)&resp,
2399 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2404 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2406 struct ena_rss *rss = &ena_dev->rss;
2407 struct ena_admin_get_feat_resp get_resp;
2411 tbl_size = (1ULL << rss->tbl_log_size) *
2412 sizeof(struct ena_admin_rss_ind_table_entry);
2414 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2415 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2416 rss->rss_ind_tbl_dma_addr,
2424 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2428 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2429 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2434 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2438 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2440 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2444 rc = ena_com_hash_key_allocate(ena_dev);
2448 rc = ena_com_hash_ctrl_init(ena_dev);
2455 ena_com_hash_key_destroy(ena_dev);
2457 ena_com_indirect_table_destroy(ena_dev);
2463 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2465 ena_com_indirect_table_destroy(ena_dev);
2466 ena_com_hash_key_destroy(ena_dev);
2467 ena_com_hash_ctrl_destroy(ena_dev);
2469 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2472 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2474 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2476 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2478 host_attr->host_info,
2479 host_attr->host_info_dma_addr,
2480 host_attr->host_info_dma_handle);
2481 if (unlikely(!host_attr->host_info))
2482 return ENA_COM_NO_MEM;
2487 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2488 u32 debug_area_size)
2490 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2492 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2494 host_attr->debug_area_virt_addr,
2495 host_attr->debug_area_dma_addr,
2496 host_attr->debug_area_dma_handle);
2497 if (unlikely(!host_attr->debug_area_virt_addr)) {
2498 host_attr->debug_area_size = 0;
2499 return ENA_COM_NO_MEM;
2502 host_attr->debug_area_size = debug_area_size;
2507 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2509 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2511 if (host_attr->host_info) {
2512 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2514 host_attr->host_info,
2515 host_attr->host_info_dma_addr,
2516 host_attr->host_info_dma_handle);
2517 host_attr->host_info = NULL;
2521 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2523 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2525 if (host_attr->debug_area_virt_addr) {
2526 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2527 host_attr->debug_area_size,
2528 host_attr->debug_area_virt_addr,
2529 host_attr->debug_area_dma_addr,
2530 host_attr->debug_area_dma_handle);
2531 host_attr->debug_area_virt_addr = NULL;
2535 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2537 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2538 struct ena_com_admin_queue *admin_queue;
2539 struct ena_admin_set_feat_cmd cmd;
2540 struct ena_admin_set_feat_resp resp;
2544 /* Host attribute config is called before ena_com_get_dev_attr_feat
2545 * so ena_com can't check if the feature is supported.
2548 memset(&cmd, 0x0, sizeof(cmd));
2549 admin_queue = &ena_dev->admin_queue;
2551 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2552 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2554 ret = ena_com_mem_addr_set(ena_dev,
2555 &cmd.u.host_attr.debug_ba,
2556 host_attr->debug_area_dma_addr);
2557 if (unlikely(ret)) {
2558 ena_trc_err("memory address set failed\n");
2562 ret = ena_com_mem_addr_set(ena_dev,
2563 &cmd.u.host_attr.os_info_ba,
2564 host_attr->host_info_dma_addr);
2565 if (unlikely(ret)) {
2566 ena_trc_err("memory address set failed\n");
2570 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2572 ret = ena_com_execute_admin_command(admin_queue,
2573 (struct ena_admin_aq_entry *)&cmd,
2575 (struct ena_admin_acq_entry *)&resp,
2579 ena_trc_err("Failed to set host attributes: %d\n", ret);
2584 /* Interrupt moderation */
2585 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2587 return ena_com_check_supported_feature_id(ena_dev,
2588 ENA_ADMIN_INTERRUPT_MODERATION);
2591 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2592 u32 tx_coalesce_usecs)
2594 if (!ena_dev->intr_delay_resolution) {
2595 ena_trc_err("Illegal interrupt delay granularity value\n");
2596 return ENA_COM_FAULT;
2599 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2600 ena_dev->intr_delay_resolution;
2605 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2606 u32 rx_coalesce_usecs)
2608 if (!ena_dev->intr_delay_resolution) {
2609 ena_trc_err("Illegal interrupt delay granularity value\n");
2610 return ENA_COM_FAULT;
2613 /* We use LOWEST entry of moderation table for storing
2614 * nonadaptive interrupt coalescing values
2616 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2617 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2622 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2624 if (ena_dev->intr_moder_tbl)
2625 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2626 ena_dev->intr_moder_tbl = NULL;
2629 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2631 struct ena_admin_get_feat_resp get_resp;
2632 u16 delay_resolution;
2635 rc = ena_com_get_feature(ena_dev, &get_resp,
2636 ENA_ADMIN_INTERRUPT_MODERATION);
2639 if (rc == ENA_COM_PERMISSION) {
2640 ena_trc_dbg("Feature %d isn't supported\n",
2641 ENA_ADMIN_INTERRUPT_MODERATION);
2644 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2648 /* no moderation supported, disable adaptive support */
2649 ena_com_disable_adaptive_moderation(ena_dev);
2653 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2657 /* if moderation is supported by device we set adaptive moderation */
2658 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2659 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2660 ena_com_enable_adaptive_moderation(ena_dev);
2664 ena_com_destroy_interrupt_moderation(ena_dev);
2668 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2670 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2672 if (!intr_moder_tbl)
2675 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2676 ENA_INTR_LOWEST_USECS;
2677 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2678 ENA_INTR_LOWEST_PKTS;
2679 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2680 ENA_INTR_LOWEST_BYTES;
2682 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2684 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2686 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2689 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2691 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2693 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2696 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2697 ENA_INTR_HIGH_USECS;
2698 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2700 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2701 ENA_INTR_HIGH_BYTES;
2703 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2704 ENA_INTR_HIGHEST_USECS;
2705 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2706 ENA_INTR_HIGHEST_PKTS;
2707 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2708 ENA_INTR_HIGHEST_BYTES;
2711 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2713 return ena_dev->intr_moder_tx_interval;
2716 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2718 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2721 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2726 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2727 enum ena_intr_moder_level level,
2728 struct ena_intr_moder_entry *entry)
2730 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2732 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2735 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2736 if (ena_dev->intr_delay_resolution)
2737 intr_moder_tbl[level].intr_moder_interval /=
2738 ena_dev->intr_delay_resolution;
2739 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2741 /* use hardcoded value until ethtool supports bytecount parameter */
2742 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2743 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2746 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2747 enum ena_intr_moder_level level,
2748 struct ena_intr_moder_entry *entry)
2750 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2752 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2755 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2756 if (ena_dev->intr_delay_resolution)
2757 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2758 entry->pkts_per_interval =
2759 intr_moder_tbl[level].pkts_per_interval;
2760 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;