4 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 /*****************************************************************************/
37 /*****************************************************************************/
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (3000000)
42 #define ENA_ASYNC_QUEUE_DEPTH 16
43 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #ifdef ENA_EXTENDED_STATS
47 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
48 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
49 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
51 #endif /* ENA_EXTENDED_STATS */
53 #define ENA_CTRL_MAJOR 0
54 #define ENA_CTRL_MINOR 0
55 #define ENA_CTRL_SUB_MINOR 1
57 #define MIN_ENA_CTRL_VER \
58 (((ENA_CTRL_MAJOR) << \
59 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
60 ((ENA_CTRL_MINOR) << \
61 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
64 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
65 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
67 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
69 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
71 #define ENA_REGS_ADMIN_INTR_MASK 1
75 /*****************************************************************************/
76 /*****************************************************************************/
77 /*****************************************************************************/
82 /* Abort - canceled by the driver */
87 ena_wait_event_t wait_event;
88 struct ena_admin_acq_entry *user_cqe;
90 enum ena_cmd_status status;
91 /* status from the device */
97 struct ena_com_stats_ctx {
98 struct ena_admin_aq_get_stats_cmd get_cmd;
99 struct ena_admin_acq_get_stats_resp get_resp;
102 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
103 struct ena_common_mem_addr *ena_addr,
106 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
107 ena_trc_err("dma address has more bits that the device supports\n");
108 return ENA_COM_INVAL;
111 ena_addr->mem_addr_low = lower_32_bits(addr);
112 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
117 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
119 struct ena_com_admin_sq *sq = &queue->sq;
120 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
122 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
126 ena_trc_err("memory allocation failed\n");
127 return ENA_COM_NO_MEM;
139 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
141 struct ena_com_admin_cq *cq = &queue->cq;
142 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
144 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
148 ena_trc_err("memory allocation failed\n");
149 return ENA_COM_NO_MEM;
158 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
159 struct ena_aenq_handlers *aenq_handlers)
161 struct ena_com_aenq *aenq = &dev->aenq;
162 u32 addr_low, addr_high, aenq_caps;
165 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
166 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
167 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
172 if (!aenq->entries) {
173 ena_trc_err("memory allocation failed\n");
174 return ENA_COM_NO_MEM;
177 aenq->head = aenq->q_depth;
180 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
181 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
183 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
184 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
187 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
188 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
189 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
190 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
191 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
193 if (unlikely(!aenq_handlers)) {
194 ena_trc_err("aenq handlers pointer is NULL\n");
195 return ENA_COM_INVAL;
198 aenq->aenq_handlers = aenq_handlers;
203 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
204 struct ena_comp_ctx *comp_ctx)
206 comp_ctx->occupied = false;
207 ATOMIC32_DEC(&queue->outstanding_cmds);
210 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
211 u16 command_id, bool capture)
213 if (unlikely(command_id >= queue->q_depth)) {
214 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
215 command_id, queue->q_depth);
219 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
220 ena_trc_err("Completion context is occupied\n");
225 ATOMIC32_INC(&queue->outstanding_cmds);
226 queue->comp_ctx[command_id].occupied = true;
229 return &queue->comp_ctx[command_id];
232 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
233 struct ena_admin_aq_entry *cmd,
234 size_t cmd_size_in_bytes,
235 struct ena_admin_acq_entry *comp,
236 size_t comp_size_in_bytes)
238 struct ena_comp_ctx *comp_ctx;
239 u16 tail_masked, cmd_id;
243 queue_size_mask = admin_queue->q_depth - 1;
245 tail_masked = admin_queue->sq.tail & queue_size_mask;
247 /* In case of queue FULL */
248 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
249 if (cnt >= admin_queue->q_depth) {
250 ena_trc_dbg("admin queue is full.\n");
251 admin_queue->stats.out_of_space++;
252 return ERR_PTR(ENA_COM_NO_SPACE);
255 cmd_id = admin_queue->curr_cmd_id;
257 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
258 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
260 cmd->aq_common_descriptor.command_id |= cmd_id &
261 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
263 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
264 if (unlikely(!comp_ctx))
265 return ERR_PTR(ENA_COM_INVAL);
267 comp_ctx->status = ENA_CMD_SUBMITTED;
268 comp_ctx->comp_size = (u32)comp_size_in_bytes;
269 comp_ctx->user_cqe = comp;
270 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
272 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
274 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
276 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
279 admin_queue->sq.tail++;
280 admin_queue->stats.submitted_cmd++;
282 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
283 admin_queue->sq.phase = !admin_queue->sq.phase;
285 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
286 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
287 admin_queue->sq.db_addr);
292 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
294 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
295 struct ena_comp_ctx *comp_ctx;
298 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
299 if (unlikely(!queue->comp_ctx)) {
300 ena_trc_err("memory allocation failed\n");
301 return ENA_COM_NO_MEM;
304 for (i = 0; i < queue->q_depth; i++) {
305 comp_ctx = get_comp_ctxt(queue, i, false);
307 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
313 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
314 struct ena_admin_aq_entry *cmd,
315 size_t cmd_size_in_bytes,
316 struct ena_admin_acq_entry *comp,
317 size_t comp_size_in_bytes)
319 unsigned long flags = 0;
320 struct ena_comp_ctx *comp_ctx;
322 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
323 if (unlikely(!admin_queue->running_state)) {
324 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
325 return ERR_PTR(ENA_COM_NO_DEVICE);
327 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
331 if (IS_ERR(comp_ctx))
332 admin_queue->running_state = false;
333 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
338 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
339 struct ena_com_create_io_ctx *ctx,
340 struct ena_com_io_sq *io_sq)
345 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
347 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
348 io_sq->desc_entry_size =
349 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
350 sizeof(struct ena_eth_io_tx_desc) :
351 sizeof(struct ena_eth_io_rx_desc);
353 size = io_sq->desc_entry_size * io_sq->q_depth;
354 io_sq->bus = ena_dev->bus;
356 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
357 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
359 io_sq->desc_addr.virt_addr,
360 io_sq->desc_addr.phys_addr,
361 io_sq->desc_addr.mem_handle,
364 if (!io_sq->desc_addr.virt_addr) {
365 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
367 io_sq->desc_addr.virt_addr,
368 io_sq->desc_addr.phys_addr,
369 io_sq->desc_addr.mem_handle);
372 if (!io_sq->desc_addr.virt_addr) {
373 ena_trc_err("memory allocation failed\n");
374 return ENA_COM_NO_MEM;
378 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
379 /* Allocate bounce buffers */
380 io_sq->bounce_buf_ctrl.buffer_size =
381 ena_dev->llq_info.desc_list_entry_size;
382 io_sq->bounce_buf_ctrl.buffers_num =
383 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
384 io_sq->bounce_buf_ctrl.next_to_use = 0;
386 size = io_sq->bounce_buf_ctrl.buffer_size *
387 io_sq->bounce_buf_ctrl.buffers_num;
389 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
391 io_sq->bounce_buf_ctrl.base_buffer,
394 if (!io_sq->bounce_buf_ctrl.base_buffer)
395 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
397 if (!io_sq->bounce_buf_ctrl.base_buffer) {
398 ena_trc_err("bounce buffer memory allocation failed\n");
399 return ENA_COM_NO_MEM;
402 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
403 sizeof(io_sq->llq_info));
405 /* Initiate the first bounce buffer */
406 io_sq->llq_buf_ctrl.curr_bounce_buf =
407 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
408 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
409 0x0, io_sq->llq_info.desc_list_entry_size);
410 io_sq->llq_buf_ctrl.descs_left_in_line =
411 io_sq->llq_info.descs_num_before_header;
413 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
414 io_sq->entries_in_tx_burst_left =
415 io_sq->llq_info.max_entries_in_tx_burst;
419 io_sq->next_to_comp = 0;
425 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
426 struct ena_com_create_io_ctx *ctx,
427 struct ena_com_io_cq *io_cq)
432 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
434 /* Use the basic completion descriptor for Rx */
435 io_cq->cdesc_entry_size_in_bytes =
436 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
437 sizeof(struct ena_eth_io_tx_cdesc) :
438 sizeof(struct ena_eth_io_rx_cdesc_base);
440 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
441 io_cq->bus = ena_dev->bus;
443 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
445 io_cq->cdesc_addr.virt_addr,
446 io_cq->cdesc_addr.phys_addr,
447 io_cq->cdesc_addr.mem_handle,
450 if (!io_cq->cdesc_addr.virt_addr) {
451 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
453 io_cq->cdesc_addr.virt_addr,
454 io_cq->cdesc_addr.phys_addr,
455 io_cq->cdesc_addr.mem_handle);
458 if (!io_cq->cdesc_addr.virt_addr) {
459 ena_trc_err("memory allocation failed\n");
460 return ENA_COM_NO_MEM;
469 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
470 struct ena_admin_acq_entry *cqe)
472 struct ena_comp_ctx *comp_ctx;
475 cmd_id = cqe->acq_common_descriptor.command &
476 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
478 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
479 if (unlikely(!comp_ctx)) {
480 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
481 admin_queue->running_state = false;
485 comp_ctx->status = ENA_CMD_COMPLETED;
486 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
488 if (comp_ctx->user_cqe)
489 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
491 if (!admin_queue->polling)
492 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
495 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
497 struct ena_admin_acq_entry *cqe = NULL;
502 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
503 phase = admin_queue->cq.phase;
505 cqe = &admin_queue->cq.entries[head_masked];
507 /* Go over all the completions */
508 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
509 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
510 /* Do not read the rest of the completion entry before the
511 * phase bit was validated
514 ena_com_handle_single_admin_completion(admin_queue, cqe);
518 if (unlikely(head_masked == admin_queue->q_depth)) {
523 cqe = &admin_queue->cq.entries[head_masked];
526 admin_queue->cq.head += comp_num;
527 admin_queue->cq.phase = phase;
528 admin_queue->sq.head += comp_num;
529 admin_queue->stats.completed_cmd += comp_num;
532 static int ena_com_comp_status_to_errno(u8 comp_status)
534 if (unlikely(comp_status != 0))
535 ena_trc_err("admin command failed[%u]\n", comp_status);
537 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
538 return ENA_COM_INVAL;
540 switch (comp_status) {
541 case ENA_ADMIN_SUCCESS:
543 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
544 return ENA_COM_NO_MEM;
545 case ENA_ADMIN_UNSUPPORTED_OPCODE:
546 return ENA_COM_UNSUPPORTED;
547 case ENA_ADMIN_BAD_OPCODE:
548 case ENA_ADMIN_MALFORMED_REQUEST:
549 case ENA_ADMIN_ILLEGAL_PARAMETER:
550 case ENA_ADMIN_UNKNOWN_ERROR:
551 return ENA_COM_INVAL;
557 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
558 struct ena_com_admin_queue *admin_queue)
560 unsigned long flags = 0;
561 unsigned long timeout;
564 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
567 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
568 ena_com_handle_admin_completion(admin_queue);
569 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
571 if (comp_ctx->status != ENA_CMD_SUBMITTED)
574 if (ENA_TIME_EXPIRE(timeout)) {
575 ena_trc_err("Wait for completion (polling) timeout\n");
576 /* ENA didn't have any completion */
577 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
578 admin_queue->stats.no_completion++;
579 admin_queue->running_state = false;
580 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
582 ret = ENA_COM_TIMER_EXPIRED;
586 ENA_MSLEEP(ENA_POLL_MS);
589 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
590 ena_trc_err("Command was aborted\n");
591 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
592 admin_queue->stats.aborted_cmd++;
593 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
594 ret = ENA_COM_NO_DEVICE;
598 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
599 "Invalid comp status %d\n", comp_ctx->status);
601 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
603 comp_ctxt_release(admin_queue, comp_ctx);
608 * Set the LLQ configurations of the firmware
610 * The driver provides only the enabled feature values to the device,
611 * which in turn, checks if they are supported.
613 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
615 struct ena_com_admin_queue *admin_queue;
616 struct ena_admin_set_feat_cmd cmd;
617 struct ena_admin_set_feat_resp resp;
618 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
621 memset(&cmd, 0x0, sizeof(cmd));
622 admin_queue = &ena_dev->admin_queue;
624 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
625 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
627 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
628 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
629 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
630 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
632 ret = ena_com_execute_admin_command(admin_queue,
633 (struct ena_admin_aq_entry *)&cmd,
635 (struct ena_admin_acq_entry *)&resp,
639 ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
644 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
645 struct ena_admin_feature_llq_desc *llq_features,
646 struct ena_llq_configurations *llq_default_cfg)
648 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
652 memset(llq_info, 0, sizeof(*llq_info));
654 supported_feat = llq_features->header_location_ctrl_supported;
656 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
657 llq_info->header_location_ctrl =
658 llq_default_cfg->llq_header_location;
660 ena_trc_err("Invalid header location control, supported: 0x%x\n",
665 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
666 supported_feat = llq_features->descriptors_stride_ctrl_supported;
667 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
668 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
670 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
671 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
672 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
673 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
675 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
680 ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
681 llq_default_cfg->llq_stride_ctrl,
683 llq_info->desc_stride_ctrl);
686 llq_info->desc_stride_ctrl = 0;
689 supported_feat = llq_features->entry_size_ctrl_supported;
690 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
691 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
692 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
694 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
695 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
696 llq_info->desc_list_entry_size = 128;
697 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
698 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
699 llq_info->desc_list_entry_size = 192;
700 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
701 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
702 llq_info->desc_list_entry_size = 256;
704 ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
708 ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
709 llq_default_cfg->llq_ring_entry_size,
711 llq_info->desc_list_entry_size);
713 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
714 /* The desc list entry size should be whole multiply of 8
715 * This requirement comes from __iowrite64_copy()
717 ena_trc_err("illegal entry size %d\n",
718 llq_info->desc_list_entry_size);
722 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
723 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
724 sizeof(struct ena_eth_io_tx_desc);
726 llq_info->descs_per_entry = 1;
728 supported_feat = llq_features->desc_num_before_header_supported;
729 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
730 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
732 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
733 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
734 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
735 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
736 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
737 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
738 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
739 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
741 ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
746 ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
747 llq_default_cfg->llq_num_decs_before_header,
749 llq_info->descs_num_before_header);
752 llq_info->max_entries_in_tx_burst =
753 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
755 rc = ena_com_set_llq(ena_dev);
757 ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
762 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
763 struct ena_com_admin_queue *admin_queue)
765 unsigned long flags = 0;
768 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
769 admin_queue->completion_timeout);
771 /* In case the command wasn't completed find out the root cause.
772 * There might be 2 kinds of errors
773 * 1) No completion (timeout reached)
774 * 2) There is completion but the device didn't get any msi-x interrupt.
776 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
777 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
778 ena_com_handle_admin_completion(admin_queue);
779 admin_queue->stats.no_completion++;
780 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
782 if (comp_ctx->status == ENA_CMD_COMPLETED)
783 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
784 comp_ctx->cmd_opcode);
786 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
787 comp_ctx->cmd_opcode, comp_ctx->status);
789 admin_queue->running_state = false;
790 ret = ENA_COM_TIMER_EXPIRED;
794 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
796 comp_ctxt_release(admin_queue, comp_ctx);
800 /* This method read the hardware device register through posting writes
801 * and waiting for response
802 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
804 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
806 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
807 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
808 mmio_read->read_resp;
809 u32 mmio_read_reg, ret, i;
810 unsigned long flags = 0;
811 u32 timeout = mmio_read->reg_read_to;
816 timeout = ENA_REG_READ_TIMEOUT;
818 /* If readless is disabled, perform regular read */
819 if (!mmio_read->readless_supported)
820 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
822 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
823 mmio_read->seq_num++;
825 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
826 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
827 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
828 mmio_read_reg |= mmio_read->seq_num &
829 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
831 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
832 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
834 for (i = 0; i < timeout; i++) {
835 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
841 if (unlikely(i == timeout)) {
842 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
847 ret = ENA_MMIO_READ_TIMEOUT;
851 if (read_resp->reg_off != offset) {
852 ena_trc_err("Read failure: wrong offset provided\n");
853 ret = ENA_MMIO_READ_TIMEOUT;
855 ret = read_resp->reg_val;
858 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
863 /* There are two types to wait for completion.
864 * Polling mode - wait until the completion is available.
865 * Async mode - wait on wait queue until the completion is ready
866 * (or the timeout expired).
867 * It is expected that the IRQ called ena_com_handle_admin_completion
868 * to mark the completions.
870 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
871 struct ena_com_admin_queue *admin_queue)
873 if (admin_queue->polling)
874 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
877 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
881 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
882 struct ena_com_io_sq *io_sq)
884 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
885 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
886 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
890 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
892 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
893 direction = ENA_ADMIN_SQ_DIRECTION_TX;
895 direction = ENA_ADMIN_SQ_DIRECTION_RX;
897 destroy_cmd.sq.sq_identity |= (direction <<
898 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
899 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
901 destroy_cmd.sq.sq_idx = io_sq->idx;
902 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
904 ret = ena_com_execute_admin_command(admin_queue,
905 (struct ena_admin_aq_entry *)&destroy_cmd,
907 (struct ena_admin_acq_entry *)&destroy_resp,
908 sizeof(destroy_resp));
910 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
911 ena_trc_err("failed to destroy io sq error: %d\n", ret);
916 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
917 struct ena_com_io_sq *io_sq,
918 struct ena_com_io_cq *io_cq)
922 if (io_cq->cdesc_addr.virt_addr) {
923 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
925 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
927 io_cq->cdesc_addr.virt_addr,
928 io_cq->cdesc_addr.phys_addr,
929 io_cq->cdesc_addr.mem_handle);
931 io_cq->cdesc_addr.virt_addr = NULL;
934 if (io_sq->desc_addr.virt_addr) {
935 size = io_sq->desc_entry_size * io_sq->q_depth;
937 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
939 io_sq->desc_addr.virt_addr,
940 io_sq->desc_addr.phys_addr,
941 io_sq->desc_addr.mem_handle);
943 io_sq->desc_addr.virt_addr = NULL;
946 if (io_sq->bounce_buf_ctrl.base_buffer) {
947 ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
948 io_sq->bounce_buf_ctrl.base_buffer = NULL;
952 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
957 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
958 timeout = (timeout * 100) / ENA_POLL_MS;
960 for (i = 0; i < timeout; i++) {
961 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
963 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
964 ena_trc_err("Reg read timeout occurred\n");
965 return ENA_COM_TIMER_EXPIRED;
968 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
972 ENA_MSLEEP(ENA_POLL_MS);
975 return ENA_COM_TIMER_EXPIRED;
978 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
979 enum ena_admin_aq_feature_id feature_id)
981 u32 feature_mask = 1 << feature_id;
983 /* Device attributes is always supported */
984 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
985 !(ena_dev->supported_features & feature_mask))
991 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
992 struct ena_admin_get_feat_resp *get_resp,
993 enum ena_admin_aq_feature_id feature_id,
994 dma_addr_t control_buf_dma_addr,
995 u32 control_buff_size,
998 struct ena_com_admin_queue *admin_queue;
999 struct ena_admin_get_feat_cmd get_cmd;
1002 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1003 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
1004 return ENA_COM_UNSUPPORTED;
1007 memset(&get_cmd, 0x0, sizeof(get_cmd));
1008 admin_queue = &ena_dev->admin_queue;
1010 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1012 if (control_buff_size)
1013 get_cmd.aq_common_descriptor.flags =
1014 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1016 get_cmd.aq_common_descriptor.flags = 0;
1018 ret = ena_com_mem_addr_set(ena_dev,
1019 &get_cmd.control_buffer.address,
1020 control_buf_dma_addr);
1021 if (unlikely(ret)) {
1022 ena_trc_err("memory address set failed\n");
1026 get_cmd.control_buffer.length = control_buff_size;
1027 get_cmd.feat_common.feature_version = feature_ver;
1028 get_cmd.feat_common.feature_id = feature_id;
1030 ret = ena_com_execute_admin_command(admin_queue,
1031 (struct ena_admin_aq_entry *)
1034 (struct ena_admin_acq_entry *)
1039 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
1045 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1046 struct ena_admin_get_feat_resp *get_resp,
1047 enum ena_admin_aq_feature_id feature_id,
1050 return ena_com_get_feature_ex(ena_dev,
1058 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1060 struct ena_rss *rss = &ena_dev->rss;
1062 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1063 sizeof(*rss->hash_key),
1065 rss->hash_key_dma_addr,
1066 rss->hash_key_mem_handle);
1068 if (unlikely(!rss->hash_key))
1069 return ENA_COM_NO_MEM;
1074 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1076 struct ena_rss *rss = &ena_dev->rss;
1079 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1080 sizeof(*rss->hash_key),
1082 rss->hash_key_dma_addr,
1083 rss->hash_key_mem_handle);
1084 rss->hash_key = NULL;
1087 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1089 struct ena_rss *rss = &ena_dev->rss;
1091 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1092 sizeof(*rss->hash_ctrl),
1094 rss->hash_ctrl_dma_addr,
1095 rss->hash_ctrl_mem_handle);
1097 if (unlikely(!rss->hash_ctrl))
1098 return ENA_COM_NO_MEM;
1103 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1105 struct ena_rss *rss = &ena_dev->rss;
1108 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1109 sizeof(*rss->hash_ctrl),
1111 rss->hash_ctrl_dma_addr,
1112 rss->hash_ctrl_mem_handle);
1113 rss->hash_ctrl = NULL;
1116 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1119 struct ena_rss *rss = &ena_dev->rss;
1120 struct ena_admin_get_feat_resp get_resp;
1124 ret = ena_com_get_feature(ena_dev, &get_resp,
1125 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1129 if ((get_resp.u.ind_table.min_size > log_size) ||
1130 (get_resp.u.ind_table.max_size < log_size)) {
1131 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1133 1 << get_resp.u.ind_table.min_size,
1134 1 << get_resp.u.ind_table.max_size);
1135 return ENA_COM_INVAL;
1138 tbl_size = (1ULL << log_size) *
1139 sizeof(struct ena_admin_rss_ind_table_entry);
1141 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1144 rss->rss_ind_tbl_dma_addr,
1145 rss->rss_ind_tbl_mem_handle);
1146 if (unlikely(!rss->rss_ind_tbl))
1149 tbl_size = (1ULL << log_size) * sizeof(u16);
1150 rss->host_rss_ind_tbl =
1151 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1152 if (unlikely(!rss->host_rss_ind_tbl))
1155 rss->tbl_log_size = log_size;
1160 tbl_size = (1ULL << log_size) *
1161 sizeof(struct ena_admin_rss_ind_table_entry);
1163 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1166 rss->rss_ind_tbl_dma_addr,
1167 rss->rss_ind_tbl_mem_handle);
1168 rss->rss_ind_tbl = NULL;
1170 rss->tbl_log_size = 0;
1171 return ENA_COM_NO_MEM;
1174 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1176 struct ena_rss *rss = &ena_dev->rss;
1177 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1178 sizeof(struct ena_admin_rss_ind_table_entry);
1180 if (rss->rss_ind_tbl)
1181 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1184 rss->rss_ind_tbl_dma_addr,
1185 rss->rss_ind_tbl_mem_handle);
1186 rss->rss_ind_tbl = NULL;
1188 if (rss->host_rss_ind_tbl)
1189 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
1190 rss->host_rss_ind_tbl = NULL;
1193 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1194 struct ena_com_io_sq *io_sq, u16 cq_idx)
1196 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1197 struct ena_admin_aq_create_sq_cmd create_cmd;
1198 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1202 memset(&create_cmd, 0x0, sizeof(create_cmd));
1204 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1206 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1207 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1209 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1211 create_cmd.sq_identity |= (direction <<
1212 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1213 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1215 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1216 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1218 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1219 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1220 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1222 create_cmd.sq_caps_3 |=
1223 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1225 create_cmd.cq_idx = cq_idx;
1226 create_cmd.sq_depth = io_sq->q_depth;
1228 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1229 ret = ena_com_mem_addr_set(ena_dev,
1231 io_sq->desc_addr.phys_addr);
1232 if (unlikely(ret)) {
1233 ena_trc_err("memory address set failed\n");
1238 ret = ena_com_execute_admin_command(admin_queue,
1239 (struct ena_admin_aq_entry *)&create_cmd,
1241 (struct ena_admin_acq_entry *)&cmd_completion,
1242 sizeof(cmd_completion));
1243 if (unlikely(ret)) {
1244 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1248 io_sq->idx = cmd_completion.sq_idx;
1250 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1251 (uintptr_t)cmd_completion.sq_doorbell_offset);
1253 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1254 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1255 + cmd_completion.llq_headers_offset);
1257 io_sq->desc_addr.pbuf_dev_addr =
1258 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1259 cmd_completion.llq_descriptors_offset);
1262 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1267 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1269 struct ena_rss *rss = &ena_dev->rss;
1270 struct ena_com_io_sq *io_sq;
1274 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1275 qid = rss->host_rss_ind_tbl[i];
1276 if (qid >= ENA_TOTAL_NUM_QUEUES)
1277 return ENA_COM_INVAL;
1279 io_sq = &ena_dev->io_sq_queues[qid];
1281 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1282 return ENA_COM_INVAL;
1284 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1290 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1292 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1293 struct ena_rss *rss = &ena_dev->rss;
1297 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1298 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1300 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1301 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1302 return ENA_COM_INVAL;
1303 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1305 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1306 return ENA_COM_INVAL;
1308 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1314 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1318 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1320 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1321 if (!ena_dev->intr_moder_tbl)
1322 return ENA_COM_NO_MEM;
1324 ena_com_config_default_interrupt_moderation_table(ena_dev);
1329 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1330 u16 intr_delay_resolution)
1332 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1335 if (!intr_delay_resolution) {
1336 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1337 intr_delay_resolution = 1;
1339 ena_dev->intr_delay_resolution = intr_delay_resolution;
1342 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1343 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1346 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1349 /*****************************************************************************/
1350 /******************************* API ******************************/
1351 /*****************************************************************************/
1353 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1354 struct ena_admin_aq_entry *cmd,
1356 struct ena_admin_acq_entry *comp,
1359 struct ena_comp_ctx *comp_ctx;
1362 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1364 if (IS_ERR(comp_ctx)) {
1365 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1366 ena_trc_dbg("Failed to submit command [%ld]\n",
1369 ena_trc_err("Failed to submit command [%ld]\n",
1372 return PTR_ERR(comp_ctx);
1375 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1376 if (unlikely(ret)) {
1377 if (admin_queue->running_state)
1378 ena_trc_err("Failed to process command. ret = %d\n",
1381 ena_trc_dbg("Failed to process command. ret = %d\n",
1387 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1388 struct ena_com_io_cq *io_cq)
1390 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1391 struct ena_admin_aq_create_cq_cmd create_cmd;
1392 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1395 memset(&create_cmd, 0x0, sizeof(create_cmd));
1397 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1399 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1400 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1401 create_cmd.cq_caps_1 |=
1402 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1404 create_cmd.msix_vector = io_cq->msix_vector;
1405 create_cmd.cq_depth = io_cq->q_depth;
1407 ret = ena_com_mem_addr_set(ena_dev,
1409 io_cq->cdesc_addr.phys_addr);
1410 if (unlikely(ret)) {
1411 ena_trc_err("memory address set failed\n");
1415 ret = ena_com_execute_admin_command(admin_queue,
1416 (struct ena_admin_aq_entry *)&create_cmd,
1418 (struct ena_admin_acq_entry *)&cmd_completion,
1419 sizeof(cmd_completion));
1420 if (unlikely(ret)) {
1421 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1425 io_cq->idx = cmd_completion.cq_idx;
1427 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1428 cmd_completion.cq_interrupt_unmask_register_offset);
1430 if (cmd_completion.cq_head_db_register_offset)
1431 io_cq->cq_head_db_reg =
1432 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1433 cmd_completion.cq_head_db_register_offset);
1435 if (cmd_completion.numa_node_register_offset)
1436 io_cq->numa_node_cfg_reg =
1437 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1438 cmd_completion.numa_node_register_offset);
1440 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1445 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1446 struct ena_com_io_sq **io_sq,
1447 struct ena_com_io_cq **io_cq)
1449 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1450 ena_trc_err("Invalid queue number %d but the max is %d\n",
1451 qid, ENA_TOTAL_NUM_QUEUES);
1452 return ENA_COM_INVAL;
1455 *io_sq = &ena_dev->io_sq_queues[qid];
1456 *io_cq = &ena_dev->io_cq_queues[qid];
1461 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1463 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1464 struct ena_comp_ctx *comp_ctx;
1467 if (!admin_queue->comp_ctx)
1470 for (i = 0; i < admin_queue->q_depth; i++) {
1471 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1472 if (unlikely(!comp_ctx))
1475 comp_ctx->status = ENA_CMD_ABORTED;
1477 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1481 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1483 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1484 unsigned long flags = 0;
1486 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1487 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1488 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1489 ENA_MSLEEP(ENA_POLL_MS);
1490 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1492 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1495 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1496 struct ena_com_io_cq *io_cq)
1498 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1499 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1500 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1503 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1505 destroy_cmd.cq_idx = io_cq->idx;
1506 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1508 ret = ena_com_execute_admin_command(admin_queue,
1509 (struct ena_admin_aq_entry *)&destroy_cmd,
1510 sizeof(destroy_cmd),
1511 (struct ena_admin_acq_entry *)&destroy_resp,
1512 sizeof(destroy_resp));
1514 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1515 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1520 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1522 return ena_dev->admin_queue.running_state;
1525 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1527 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1528 unsigned long flags = 0;
1530 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1531 ena_dev->admin_queue.running_state = state;
1532 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1535 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1537 u16 depth = ena_dev->aenq.q_depth;
1539 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1541 /* Init head_db to mark that all entries in the queue
1542 * are initially available
1544 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1547 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1549 struct ena_com_admin_queue *admin_queue;
1550 struct ena_admin_set_feat_cmd cmd;
1551 struct ena_admin_set_feat_resp resp;
1552 struct ena_admin_get_feat_resp get_resp;
1555 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1557 ena_trc_info("Can't get aenq configuration\n");
1561 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1562 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1563 get_resp.u.aenq.supported_groups,
1565 return ENA_COM_UNSUPPORTED;
1568 memset(&cmd, 0x0, sizeof(cmd));
1569 admin_queue = &ena_dev->admin_queue;
1571 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1572 cmd.aq_common_descriptor.flags = 0;
1573 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1574 cmd.u.aenq.enabled_groups = groups_flag;
1576 ret = ena_com_execute_admin_command(admin_queue,
1577 (struct ena_admin_aq_entry *)&cmd,
1579 (struct ena_admin_acq_entry *)&resp,
1583 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1588 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1590 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1593 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1594 ena_trc_err("Reg read timeout occurred\n");
1595 return ENA_COM_TIMER_EXPIRED;
1598 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1599 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1601 ena_trc_dbg("ENA dma width: %d\n", width);
1603 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1604 ena_trc_err("DMA width illegal value: %d\n", width);
1605 return ENA_COM_INVAL;
1608 ena_dev->dma_addr_bits = width;
1613 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1617 u32 ctrl_ver_masked;
1619 /* Make sure the ENA version and the controller version are at least
1620 * as the driver expects
1622 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1623 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1624 ENA_REGS_CONTROLLER_VERSION_OFF);
1626 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1627 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1628 ena_trc_err("Reg read timeout occurred\n");
1629 return ENA_COM_TIMER_EXPIRED;
1632 ena_trc_info("ena device version: %d.%d\n",
1633 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1634 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1635 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1637 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1638 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1639 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1640 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1641 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1642 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1643 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1644 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1647 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1648 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1649 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1651 /* Validate the ctrl version without the implementation ID */
1652 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1653 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1660 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1662 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1663 struct ena_com_admin_cq *cq = &admin_queue->cq;
1664 struct ena_com_admin_sq *sq = &admin_queue->sq;
1665 struct ena_com_aenq *aenq = &ena_dev->aenq;
1668 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1669 if (admin_queue->comp_ctx)
1670 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
1671 admin_queue->comp_ctx = NULL;
1672 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1674 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1675 sq->dma_addr, sq->mem_handle);
1678 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1680 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1681 cq->dma_addr, cq->mem_handle);
1684 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1685 if (ena_dev->aenq.entries)
1686 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1687 aenq->dma_addr, aenq->mem_handle);
1688 aenq->entries = NULL;
1689 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1692 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1697 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1699 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1700 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1701 ena_dev->admin_queue.polling = polling;
1704 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1706 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1708 ENA_SPINLOCK_INIT(mmio_read->lock);
1709 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1710 sizeof(*mmio_read->read_resp),
1711 mmio_read->read_resp,
1712 mmio_read->read_resp_dma_addr,
1713 mmio_read->read_resp_mem_handle);
1714 if (unlikely(!mmio_read->read_resp))
1717 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1719 mmio_read->read_resp->req_id = 0x0;
1720 mmio_read->seq_num = 0x0;
1721 mmio_read->readless_supported = true;
1726 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1727 return ENA_COM_NO_MEM;
1730 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1732 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1734 mmio_read->readless_supported = readless_supported;
1737 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1739 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1741 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1742 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1744 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1745 sizeof(*mmio_read->read_resp),
1746 mmio_read->read_resp,
1747 mmio_read->read_resp_dma_addr,
1748 mmio_read->read_resp_mem_handle);
1750 mmio_read->read_resp = NULL;
1751 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1754 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1756 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1757 u32 addr_low, addr_high;
1759 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1760 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1762 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1763 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1766 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1767 struct ena_aenq_handlers *aenq_handlers)
1769 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1770 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1773 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1775 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1776 ena_trc_err("Reg read timeout occurred\n");
1777 return ENA_COM_TIMER_EXPIRED;
1780 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1781 ena_trc_err("Device isn't ready, abort com init\n");
1782 return ENA_COM_NO_DEVICE;
1785 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1787 admin_queue->bus = ena_dev->bus;
1788 admin_queue->q_dmadev = ena_dev->dmadev;
1789 admin_queue->polling = false;
1790 admin_queue->curr_cmd_id = 0;
1792 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1794 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1796 ret = ena_com_init_comp_ctxt(admin_queue);
1800 ret = ena_com_admin_init_sq(admin_queue);
1804 ret = ena_com_admin_init_cq(admin_queue);
1808 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1809 ENA_REGS_AQ_DB_OFF);
1811 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1812 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1814 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1815 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1817 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1818 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1820 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1821 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1824 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1825 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1826 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1827 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1830 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1831 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1832 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1833 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1835 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1836 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1837 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1841 admin_queue->running_state = true;
1845 ena_com_admin_destroy(ena_dev);
1850 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1851 struct ena_com_create_io_ctx *ctx)
1853 struct ena_com_io_sq *io_sq;
1854 struct ena_com_io_cq *io_cq;
1857 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1858 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1859 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1860 return ENA_COM_INVAL;
1863 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1864 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1866 memset(io_sq, 0x0, sizeof(*io_sq));
1867 memset(io_cq, 0x0, sizeof(*io_cq));
1870 io_cq->q_depth = ctx->queue_size;
1871 io_cq->direction = ctx->direction;
1872 io_cq->qid = ctx->qid;
1874 io_cq->msix_vector = ctx->msix_vector;
1876 io_sq->q_depth = ctx->queue_size;
1877 io_sq->direction = ctx->direction;
1878 io_sq->qid = ctx->qid;
1880 io_sq->mem_queue_type = ctx->mem_queue_type;
1882 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1883 /* header length is limited to 8 bits */
1884 io_sq->tx_max_header_size =
1885 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1887 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1890 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1894 ret = ena_com_create_io_cq(ena_dev, io_cq);
1898 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1905 ena_com_destroy_io_cq(ena_dev, io_cq);
1907 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1911 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1913 struct ena_com_io_sq *io_sq;
1914 struct ena_com_io_cq *io_cq;
1916 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1917 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1918 qid, ENA_TOTAL_NUM_QUEUES);
1922 io_sq = &ena_dev->io_sq_queues[qid];
1923 io_cq = &ena_dev->io_cq_queues[qid];
1925 ena_com_destroy_io_sq(ena_dev, io_sq);
1926 ena_com_destroy_io_cq(ena_dev, io_cq);
1928 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1931 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1932 struct ena_admin_get_feat_resp *resp)
1934 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1937 int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
1939 struct ena_admin_get_feat_resp resp;
1940 struct ena_extra_properties_strings *extra_properties_strings =
1941 &ena_dev->extra_properties_strings;
1943 extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
1944 ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
1946 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1947 extra_properties_strings->size,
1948 extra_properties_strings->virt_addr,
1949 extra_properties_strings->dma_addr,
1950 extra_properties_strings->dma_handle);
1951 if (unlikely(!extra_properties_strings->virt_addr)) {
1952 ena_trc_err("Failed to allocate extra properties strings\n");
1956 rc = ena_com_get_feature_ex(ena_dev, &resp,
1957 ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
1958 extra_properties_strings->dma_addr,
1959 extra_properties_strings->size, 0);
1961 ena_trc_dbg("Failed to get extra properties strings\n");
1965 return resp.u.extra_properties_strings.count;
1967 ena_com_delete_extra_properties_strings(ena_dev);
1971 void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
1973 struct ena_extra_properties_strings *extra_properties_strings =
1974 &ena_dev->extra_properties_strings;
1976 if (extra_properties_strings->virt_addr) {
1977 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1978 extra_properties_strings->size,
1979 extra_properties_strings->virt_addr,
1980 extra_properties_strings->dma_addr,
1981 extra_properties_strings->dma_handle);
1982 extra_properties_strings->virt_addr = NULL;
1986 int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
1987 struct ena_admin_get_feat_resp *resp)
1989 return ena_com_get_feature(ena_dev, resp,
1990 ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0);
1993 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1994 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1996 struct ena_admin_get_feat_resp get_resp;
1999 rc = ena_com_get_feature(ena_dev, &get_resp,
2000 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
2004 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
2005 sizeof(get_resp.u.dev_attr));
2006 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2008 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2009 rc = ena_com_get_feature(ena_dev, &get_resp,
2010 ENA_ADMIN_MAX_QUEUES_EXT,
2011 ENA_FEATURE_MAX_QUEUE_EXT_VER);
2015 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
2018 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2019 sizeof(get_resp.u.max_queue_ext));
2020 ena_dev->tx_max_header_size =
2021 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
2023 rc = ena_com_get_feature(ena_dev, &get_resp,
2024 ENA_ADMIN_MAX_QUEUES_NUM, 0);
2025 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2026 sizeof(get_resp.u.max_queue));
2027 ena_dev->tx_max_header_size =
2028 get_resp.u.max_queue.max_header_size;
2034 rc = ena_com_get_feature(ena_dev, &get_resp,
2035 ENA_ADMIN_AENQ_CONFIG, 0);
2039 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2040 sizeof(get_resp.u.aenq));
2042 rc = ena_com_get_feature(ena_dev, &get_resp,
2043 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2047 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2048 sizeof(get_resp.u.offload));
2050 /* Driver hints isn't mandatory admin command. So in case the
2051 * command isn't supported set driver hints to 0
2053 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2056 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2057 sizeof(get_resp.u.hw_hints));
2058 else if (rc == ENA_COM_UNSUPPORTED)
2059 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2063 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2065 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2066 sizeof(get_resp.u.llq));
2067 else if (rc == ENA_COM_UNSUPPORTED)
2068 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2072 rc = ena_com_get_feature(ena_dev, &get_resp,
2073 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
2075 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
2076 sizeof(get_resp.u.ind_table));
2077 else if (rc == ENA_COM_UNSUPPORTED)
2078 memset(&get_feat_ctx->ind_table, 0x0,
2079 sizeof(get_feat_ctx->ind_table));
2086 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2088 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2091 /* ena_handle_specific_aenq_event:
2092 * return the handler that is relevant to the specific event group
2094 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2097 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2099 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2100 return aenq_handlers->handlers[group];
2102 return aenq_handlers->unimplemented_handler;
2105 /* ena_aenq_intr_handler:
2106 * handles the aenq incoming events.
2107 * pop events from the queue and apply the specific handler
2109 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2111 struct ena_admin_aenq_entry *aenq_e;
2112 struct ena_admin_aenq_common_desc *aenq_common;
2113 struct ena_com_aenq *aenq = &dev->aenq;
2114 unsigned long long timestamp;
2115 ena_aenq_handler handler_cb;
2116 u16 masked_head, processed = 0;
2119 masked_head = aenq->head & (aenq->q_depth - 1);
2120 phase = aenq->phase;
2121 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2122 aenq_common = &aenq_e->aenq_common_desc;
2124 /* Go over all the events */
2125 while ((READ_ONCE8(aenq_common->flags) &
2126 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2127 /* Make sure the phase bit (ownership) is as expected before
2128 * reading the rest of the descriptor.
2132 timestamp = (unsigned long long)aenq_common->timestamp_low |
2133 ((unsigned long long)aenq_common->timestamp_high << 32);
2134 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2136 aenq_common->syndrom,
2139 /* Handle specific event*/
2140 handler_cb = ena_com_get_specific_aenq_cb(dev,
2141 aenq_common->group);
2142 handler_cb(data, aenq_e); /* call the actual event handler*/
2144 /* Get next event entry */
2148 if (unlikely(masked_head == aenq->q_depth)) {
2152 aenq_e = &aenq->entries[masked_head];
2153 aenq_common = &aenq_e->aenq_common_desc;
2156 aenq->head += processed;
2157 aenq->phase = phase;
2159 /* Don't update aenq doorbell if there weren't any processed events */
2163 /* write the aenq doorbell after all AENQ descriptors were read */
2165 ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
2166 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2169 #ifdef ENA_EXTENDED_STATS
2171 * Sets the function Idx and Queue Idx to be used for
2172 * get full statistics feature
2175 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
2179 /* Function & Queue is acquired from user in the following format :
2180 * Bottom Half word: funct
2181 * Top Half Word: queue
2183 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2184 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2189 #endif /* ENA_EXTENDED_STATS */
2191 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2192 enum ena_regs_reset_reason_types reset_reason)
2194 u32 stat, timeout, cap, reset_val;
2197 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2198 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2200 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2201 (cap == ENA_MMIO_READ_TIMEOUT))) {
2202 ena_trc_err("Reg read32 timeout occurred\n");
2203 return ENA_COM_TIMER_EXPIRED;
2206 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2207 ena_trc_err("Device isn't ready, can't reset device\n");
2208 return ENA_COM_INVAL;
2211 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2212 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2214 ena_trc_err("Invalid timeout value\n");
2215 return ENA_COM_INVAL;
2219 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2220 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2221 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2222 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2224 /* Write again the MMIO read request address */
2225 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2227 rc = wait_for_reset_state(ena_dev, timeout,
2228 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2230 ena_trc_err("Reset indication didn't turn on\n");
2235 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2236 rc = wait_for_reset_state(ena_dev, timeout, 0);
2238 ena_trc_err("Reset indication didn't turn off\n");
2242 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2243 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2245 /* the resolution of timeout reg is 100ms */
2246 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2248 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2253 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2254 struct ena_com_stats_ctx *ctx,
2255 enum ena_admin_get_stats_type type)
2257 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2258 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2259 struct ena_com_admin_queue *admin_queue;
2262 admin_queue = &ena_dev->admin_queue;
2264 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2265 get_cmd->aq_common_descriptor.flags = 0;
2266 get_cmd->type = type;
2268 ret = ena_com_execute_admin_command(admin_queue,
2269 (struct ena_admin_aq_entry *)get_cmd,
2271 (struct ena_admin_acq_entry *)get_resp,
2275 ena_trc_err("Failed to get stats. error: %d\n", ret);
2280 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2281 struct ena_admin_basic_stats *stats)
2283 struct ena_com_stats_ctx ctx;
2286 memset(&ctx, 0x0, sizeof(ctx));
2287 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2288 if (likely(ret == 0))
2289 memcpy(stats, &ctx.get_resp.basic_stats,
2290 sizeof(ctx.get_resp.basic_stats));
2294 #ifdef ENA_EXTENDED_STATS
2296 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
2299 struct ena_com_stats_ctx ctx;
2300 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
2301 ena_mem_handle_t mem_handle;
2303 dma_addr_t phys_addr;
2306 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2307 virt_addr, phys_addr, mem_handle);
2309 ret = ENA_COM_NO_MEM;
2312 memset(&ctx, 0x0, sizeof(ctx));
2313 ret = ena_com_mem_addr_set(ena_dev,
2314 &get_cmd->u.control_buffer.address,
2316 if (unlikely(ret)) {
2317 ena_trc_err("memory address set failed\n");
2318 goto free_ext_stats_mem;
2320 get_cmd->u.control_buffer.length = len;
2322 get_cmd->device_id = ena_dev->stats_func;
2323 get_cmd->queue_idx = ena_dev->stats_queue;
2325 ret = ena_get_dev_stats(ena_dev, &ctx,
2326 ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
2328 goto free_ext_stats_mem;
2330 ret = snprintf(buff, len, "%s", (char *)virt_addr);
2333 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2340 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2342 struct ena_com_admin_queue *admin_queue;
2343 struct ena_admin_set_feat_cmd cmd;
2344 struct ena_admin_set_feat_resp resp;
2347 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2348 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2349 return ENA_COM_UNSUPPORTED;
2352 memset(&cmd, 0x0, sizeof(cmd));
2353 admin_queue = &ena_dev->admin_queue;
2355 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2356 cmd.aq_common_descriptor.flags = 0;
2357 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2358 cmd.u.mtu.mtu = mtu;
2360 ret = ena_com_execute_admin_command(admin_queue,
2361 (struct ena_admin_aq_entry *)&cmd,
2363 (struct ena_admin_acq_entry *)&resp,
2367 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2372 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2373 struct ena_admin_feature_offload_desc *offload)
2376 struct ena_admin_get_feat_resp resp;
2378 ret = ena_com_get_feature(ena_dev, &resp,
2379 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2380 if (unlikely(ret)) {
2381 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2385 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2390 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2392 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2393 struct ena_rss *rss = &ena_dev->rss;
2394 struct ena_admin_set_feat_cmd cmd;
2395 struct ena_admin_set_feat_resp resp;
2396 struct ena_admin_get_feat_resp get_resp;
2399 if (!ena_com_check_supported_feature_id(ena_dev,
2400 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2401 ena_trc_dbg("Feature %d isn't supported\n",
2402 ENA_ADMIN_RSS_HASH_FUNCTION);
2403 return ENA_COM_UNSUPPORTED;
2406 /* Validate hash function is supported */
2407 ret = ena_com_get_feature(ena_dev, &get_resp,
2408 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2412 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2413 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2415 return ENA_COM_UNSUPPORTED;
2418 memset(&cmd, 0x0, sizeof(cmd));
2420 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2421 cmd.aq_common_descriptor.flags =
2422 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2423 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2424 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2425 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2427 ret = ena_com_mem_addr_set(ena_dev,
2428 &cmd.control_buffer.address,
2429 rss->hash_key_dma_addr);
2430 if (unlikely(ret)) {
2431 ena_trc_err("memory address set failed\n");
2435 cmd.control_buffer.length = sizeof(*rss->hash_key);
2437 ret = ena_com_execute_admin_command(admin_queue,
2438 (struct ena_admin_aq_entry *)&cmd,
2440 (struct ena_admin_acq_entry *)&resp,
2442 if (unlikely(ret)) {
2443 ena_trc_err("Failed to set hash function %d. error: %d\n",
2444 rss->hash_func, ret);
2445 return ENA_COM_INVAL;
2451 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2452 enum ena_admin_hash_functions func,
2453 const u8 *key, u16 key_len, u32 init_val)
2455 struct ena_rss *rss = &ena_dev->rss;
2456 struct ena_admin_get_feat_resp get_resp;
2457 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2461 /* Make sure size is a mult of DWs */
2462 if (unlikely(key_len & 0x3))
2463 return ENA_COM_INVAL;
2465 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2466 ENA_ADMIN_RSS_HASH_FUNCTION,
2467 rss->hash_key_dma_addr,
2468 sizeof(*rss->hash_key), 0);
2472 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2473 ena_trc_err("Flow hash function %d isn't supported\n", func);
2474 return ENA_COM_UNSUPPORTED;
2478 case ENA_ADMIN_TOEPLITZ:
2479 if (key_len > sizeof(hash_key->key)) {
2480 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2481 key_len, sizeof(hash_key->key));
2482 return ENA_COM_INVAL;
2485 memcpy(hash_key->key, key, key_len);
2486 rss->hash_init_val = init_val;
2487 hash_key->keys_num = key_len >> 2;
2489 case ENA_ADMIN_CRC32:
2490 rss->hash_init_val = init_val;
2493 ena_trc_err("Invalid hash function (%d)\n", func);
2494 return ENA_COM_INVAL;
2497 rss->hash_func = func;
2498 rc = ena_com_set_hash_function(ena_dev);
2500 /* Restore the old function */
2502 ena_com_get_hash_function(ena_dev, NULL, NULL);
2507 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2508 enum ena_admin_hash_functions *func,
2511 struct ena_rss *rss = &ena_dev->rss;
2512 struct ena_admin_get_feat_resp get_resp;
2513 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2517 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2518 ENA_ADMIN_RSS_HASH_FUNCTION,
2519 rss->hash_key_dma_addr,
2520 sizeof(*rss->hash_key), 0);
2524 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2526 *func = rss->hash_func;
2529 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2534 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2535 enum ena_admin_flow_hash_proto proto,
2538 struct ena_rss *rss = &ena_dev->rss;
2539 struct ena_admin_get_feat_resp get_resp;
2542 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2543 ENA_ADMIN_RSS_HASH_INPUT,
2544 rss->hash_ctrl_dma_addr,
2545 sizeof(*rss->hash_ctrl), 0);
2550 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2555 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2557 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2558 struct ena_rss *rss = &ena_dev->rss;
2559 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2560 struct ena_admin_set_feat_cmd cmd;
2561 struct ena_admin_set_feat_resp resp;
2564 if (!ena_com_check_supported_feature_id(ena_dev,
2565 ENA_ADMIN_RSS_HASH_INPUT)) {
2566 ena_trc_dbg("Feature %d isn't supported\n",
2567 ENA_ADMIN_RSS_HASH_INPUT);
2568 return ENA_COM_UNSUPPORTED;
2571 memset(&cmd, 0x0, sizeof(cmd));
2573 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2574 cmd.aq_common_descriptor.flags =
2575 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2576 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2577 cmd.u.flow_hash_input.enabled_input_sort =
2578 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2579 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2581 ret = ena_com_mem_addr_set(ena_dev,
2582 &cmd.control_buffer.address,
2583 rss->hash_ctrl_dma_addr);
2584 if (unlikely(ret)) {
2585 ena_trc_err("memory address set failed\n");
2588 cmd.control_buffer.length = sizeof(*hash_ctrl);
2590 ret = ena_com_execute_admin_command(admin_queue,
2591 (struct ena_admin_aq_entry *)&cmd,
2593 (struct ena_admin_acq_entry *)&resp,
2596 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2601 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2603 struct ena_rss *rss = &ena_dev->rss;
2604 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2606 u16 available_fields = 0;
2609 /* Get the supported hash input */
2610 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2614 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2615 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2616 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2618 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2619 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2620 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2622 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2623 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2624 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2626 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2627 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2628 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2630 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2631 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2633 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2634 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2636 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2637 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2639 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2640 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2642 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2643 available_fields = hash_ctrl->selected_fields[i].fields &
2644 hash_ctrl->supported_fields[i].fields;
2645 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2646 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2647 i, hash_ctrl->supported_fields[i].fields,
2648 hash_ctrl->selected_fields[i].fields);
2649 return ENA_COM_UNSUPPORTED;
2653 rc = ena_com_set_hash_ctrl(ena_dev);
2655 /* In case of failure, restore the old hash ctrl */
2657 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2662 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2663 enum ena_admin_flow_hash_proto proto,
2666 struct ena_rss *rss = &ena_dev->rss;
2667 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2668 u16 supported_fields;
2671 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2672 ena_trc_err("Invalid proto num (%u)\n", proto);
2673 return ENA_COM_INVAL;
2676 /* Get the ctrl table */
2677 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2681 /* Make sure all the fields are supported */
2682 supported_fields = hash_ctrl->supported_fields[proto].fields;
2683 if ((hash_fields & supported_fields) != hash_fields) {
2684 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2685 proto, hash_fields, supported_fields);
2688 hash_ctrl->selected_fields[proto].fields = hash_fields;
2690 rc = ena_com_set_hash_ctrl(ena_dev);
2692 /* In case of failure, restore the old hash ctrl */
2694 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2699 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2700 u16 entry_idx, u16 entry_value)
2702 struct ena_rss *rss = &ena_dev->rss;
2704 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2705 return ENA_COM_INVAL;
2707 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2708 return ENA_COM_INVAL;
2710 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2715 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2717 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2718 struct ena_rss *rss = &ena_dev->rss;
2719 struct ena_admin_set_feat_cmd cmd;
2720 struct ena_admin_set_feat_resp resp;
2723 if (!ena_com_check_supported_feature_id(ena_dev,
2724 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2725 ena_trc_dbg("Feature %d isn't supported\n",
2726 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2727 return ENA_COM_UNSUPPORTED;
2730 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2732 ena_trc_err("Failed to convert host indirection table to device table\n");
2736 memset(&cmd, 0x0, sizeof(cmd));
2738 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2739 cmd.aq_common_descriptor.flags =
2740 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2741 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2742 cmd.u.ind_table.size = rss->tbl_log_size;
2743 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2745 ret = ena_com_mem_addr_set(ena_dev,
2746 &cmd.control_buffer.address,
2747 rss->rss_ind_tbl_dma_addr);
2748 if (unlikely(ret)) {
2749 ena_trc_err("memory address set failed\n");
2753 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2754 sizeof(struct ena_admin_rss_ind_table_entry);
2756 ret = ena_com_execute_admin_command(admin_queue,
2757 (struct ena_admin_aq_entry *)&cmd,
2759 (struct ena_admin_acq_entry *)&resp,
2763 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2768 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2770 struct ena_rss *rss = &ena_dev->rss;
2771 struct ena_admin_get_feat_resp get_resp;
2775 tbl_size = (1ULL << rss->tbl_log_size) *
2776 sizeof(struct ena_admin_rss_ind_table_entry);
2778 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2779 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2780 rss->rss_ind_tbl_dma_addr,
2788 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2792 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2793 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2798 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2802 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2804 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2808 rc = ena_com_hash_key_allocate(ena_dev);
2812 rc = ena_com_hash_ctrl_init(ena_dev);
2819 ena_com_hash_key_destroy(ena_dev);
2821 ena_com_indirect_table_destroy(ena_dev);
2827 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2829 ena_com_indirect_table_destroy(ena_dev);
2830 ena_com_hash_key_destroy(ena_dev);
2831 ena_com_hash_ctrl_destroy(ena_dev);
2833 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2836 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2838 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2840 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2842 host_attr->host_info,
2843 host_attr->host_info_dma_addr,
2844 host_attr->host_info_dma_handle);
2845 if (unlikely(!host_attr->host_info))
2846 return ENA_COM_NO_MEM;
2848 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2849 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2850 (ENA_COMMON_SPEC_VERSION_MINOR));
2855 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2856 u32 debug_area_size)
2858 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2860 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2862 host_attr->debug_area_virt_addr,
2863 host_attr->debug_area_dma_addr,
2864 host_attr->debug_area_dma_handle);
2865 if (unlikely(!host_attr->debug_area_virt_addr)) {
2866 host_attr->debug_area_size = 0;
2867 return ENA_COM_NO_MEM;
2870 host_attr->debug_area_size = debug_area_size;
2875 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2877 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2879 if (host_attr->host_info) {
2880 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2882 host_attr->host_info,
2883 host_attr->host_info_dma_addr,
2884 host_attr->host_info_dma_handle);
2885 host_attr->host_info = NULL;
2889 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2891 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2893 if (host_attr->debug_area_virt_addr) {
2894 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2895 host_attr->debug_area_size,
2896 host_attr->debug_area_virt_addr,
2897 host_attr->debug_area_dma_addr,
2898 host_attr->debug_area_dma_handle);
2899 host_attr->debug_area_virt_addr = NULL;
2903 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2905 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2906 struct ena_com_admin_queue *admin_queue;
2907 struct ena_admin_set_feat_cmd cmd;
2908 struct ena_admin_set_feat_resp resp;
2912 /* Host attribute config is called before ena_com_get_dev_attr_feat
2913 * so ena_com can't check if the feature is supported.
2916 memset(&cmd, 0x0, sizeof(cmd));
2917 admin_queue = &ena_dev->admin_queue;
2919 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2920 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2922 ret = ena_com_mem_addr_set(ena_dev,
2923 &cmd.u.host_attr.debug_ba,
2924 host_attr->debug_area_dma_addr);
2925 if (unlikely(ret)) {
2926 ena_trc_err("memory address set failed\n");
2930 ret = ena_com_mem_addr_set(ena_dev,
2931 &cmd.u.host_attr.os_info_ba,
2932 host_attr->host_info_dma_addr);
2933 if (unlikely(ret)) {
2934 ena_trc_err("memory address set failed\n");
2938 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2940 ret = ena_com_execute_admin_command(admin_queue,
2941 (struct ena_admin_aq_entry *)&cmd,
2943 (struct ena_admin_acq_entry *)&resp,
2947 ena_trc_err("Failed to set host attributes: %d\n", ret);
2952 /* Interrupt moderation */
2953 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2955 return ena_com_check_supported_feature_id(ena_dev,
2956 ENA_ADMIN_INTERRUPT_MODERATION);
2959 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2960 u32 tx_coalesce_usecs)
2962 if (!ena_dev->intr_delay_resolution) {
2963 ena_trc_err("Illegal interrupt delay granularity value\n");
2964 return ENA_COM_FAULT;
2967 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2968 ena_dev->intr_delay_resolution;
2973 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2974 u32 rx_coalesce_usecs)
2976 if (!ena_dev->intr_delay_resolution) {
2977 ena_trc_err("Illegal interrupt delay granularity value\n");
2978 return ENA_COM_FAULT;
2981 /* We use LOWEST entry of moderation table for storing
2982 * nonadaptive interrupt coalescing values
2984 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2985 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2990 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2992 if (ena_dev->intr_moder_tbl)
2993 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2994 ena_dev->intr_moder_tbl = NULL;
2997 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2999 struct ena_admin_get_feat_resp get_resp;
3000 u16 delay_resolution;
3003 rc = ena_com_get_feature(ena_dev, &get_resp,
3004 ENA_ADMIN_INTERRUPT_MODERATION, 0);
3007 if (rc == ENA_COM_UNSUPPORTED) {
3008 ena_trc_dbg("Feature %d isn't supported\n",
3009 ENA_ADMIN_INTERRUPT_MODERATION);
3012 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
3016 /* no moderation supported, disable adaptive support */
3017 ena_com_disable_adaptive_moderation(ena_dev);
3021 rc = ena_com_init_interrupt_moderation_table(ena_dev);
3025 /* if moderation is supported by device we set adaptive moderation */
3026 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
3027 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
3028 ena_com_enable_adaptive_moderation(ena_dev);
3032 ena_com_destroy_interrupt_moderation(ena_dev);
3036 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
3038 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3040 if (!intr_moder_tbl)
3043 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
3044 ENA_INTR_LOWEST_USECS;
3045 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
3046 ENA_INTR_LOWEST_PKTS;
3047 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
3048 ENA_INTR_LOWEST_BYTES;
3050 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
3052 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
3054 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
3057 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
3059 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
3061 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
3064 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
3065 ENA_INTR_HIGH_USECS;
3066 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
3068 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
3069 ENA_INTR_HIGH_BYTES;
3071 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
3072 ENA_INTR_HIGHEST_USECS;
3073 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
3074 ENA_INTR_HIGHEST_PKTS;
3075 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
3076 ENA_INTR_HIGHEST_BYTES;
3079 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
3081 return ena_dev->intr_moder_tx_interval;
3084 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
3086 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3089 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
3094 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
3095 enum ena_intr_moder_level level,
3096 struct ena_intr_moder_entry *entry)
3098 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3100 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3103 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
3104 if (ena_dev->intr_delay_resolution)
3105 intr_moder_tbl[level].intr_moder_interval /=
3106 ena_dev->intr_delay_resolution;
3107 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
3109 /* use hardcoded value until ethtool supports bytecount parameter */
3110 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
3111 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
3114 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
3115 enum ena_intr_moder_level level,
3116 struct ena_intr_moder_entry *entry)
3118 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3120 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3123 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
3124 if (ena_dev->intr_delay_resolution)
3125 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
3126 entry->pkts_per_interval =
3127 intr_moder_tbl[level].pkts_per_interval;
3128 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
3131 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3132 struct ena_admin_feature_llq_desc *llq_features,
3133 struct ena_llq_configurations *llq_default_cfg)
3138 if (!llq_features->max_llq_num) {
3139 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3143 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3147 /* Validate the descriptor is not too big */
3148 size = ena_dev->tx_max_header_size;
3149 size += ena_dev->llq_info.descs_num_before_header *
3150 sizeof(struct ena_eth_io_tx_desc);
3152 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
3153 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
3154 return ENA_COM_INVAL;
3157 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;