2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 /*****************************************************************************/
37 /*****************************************************************************/
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (3000000)
42 #define ENA_ASYNC_QUEUE_DEPTH 16
43 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #ifdef ENA_EXTENDED_STATS
47 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
48 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
49 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
51 #endif /* ENA_EXTENDED_STATS */
53 #define ENA_CTRL_MAJOR 0
54 #define ENA_CTRL_MINOR 0
55 #define ENA_CTRL_SUB_MINOR 1
57 #define MIN_ENA_CTRL_VER \
58 (((ENA_CTRL_MAJOR) << \
59 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
60 ((ENA_CTRL_MINOR) << \
61 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
64 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
65 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
67 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
69 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
71 #define ENA_REGS_ADMIN_INTR_MASK 1
73 #define ENA_MIN_ADMIN_POLL_US 100
75 #define ENA_MAX_ADMIN_POLL_US 5000
77 /*****************************************************************************/
78 /*****************************************************************************/
79 /*****************************************************************************/
84 /* Abort - canceled by the driver */
89 ena_wait_event_t wait_event;
90 struct ena_admin_acq_entry *user_cqe;
92 enum ena_cmd_status status;
93 /* status from the device */
99 struct ena_com_stats_ctx {
100 struct ena_admin_aq_get_stats_cmd get_cmd;
101 struct ena_admin_acq_get_stats_resp get_resp;
104 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
105 struct ena_common_mem_addr *ena_addr,
108 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
109 ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n");
110 return ENA_COM_INVAL;
113 ena_addr->mem_addr_low = lower_32_bits(addr);
114 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
119 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
121 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
122 struct ena_com_admin_sq *sq = &admin_queue->sq;
123 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
125 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
129 ena_trc_err(ena_dev, "Memory allocation failed\n");
130 return ENA_COM_NO_MEM;
142 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
144 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
145 struct ena_com_admin_cq *cq = &admin_queue->cq;
146 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
148 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
152 ena_trc_err(ena_dev, "Memory allocation failed\n");
153 return ENA_COM_NO_MEM;
162 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
163 struct ena_aenq_handlers *aenq_handlers)
165 struct ena_com_aenq *aenq = &ena_dev->aenq;
166 u32 addr_low, addr_high, aenq_caps;
169 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
170 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
171 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
176 if (!aenq->entries) {
177 ena_trc_err(ena_dev, "Memory allocation failed\n");
178 return ENA_COM_NO_MEM;
181 aenq->head = aenq->q_depth;
184 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
185 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
187 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
188 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
191 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
192 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
193 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
194 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
195 ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
197 if (unlikely(!aenq_handlers)) {
198 ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n");
199 return ENA_COM_INVAL;
202 aenq->aenq_handlers = aenq_handlers;
207 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
208 struct ena_comp_ctx *comp_ctx)
210 comp_ctx->occupied = false;
211 ATOMIC32_DEC(&queue->outstanding_cmds);
214 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
215 u16 command_id, bool capture)
217 if (unlikely(command_id >= admin_queue->q_depth)) {
218 ena_trc_err(admin_queue->ena_dev,
219 "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
220 command_id, admin_queue->q_depth);
224 if (unlikely(!admin_queue->comp_ctx)) {
225 ena_trc_err(admin_queue->ena_dev,
226 "Completion context is NULL\n");
230 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
231 ena_trc_err(admin_queue->ena_dev,
232 "Completion context is occupied\n");
237 ATOMIC32_INC(&admin_queue->outstanding_cmds);
238 admin_queue->comp_ctx[command_id].occupied = true;
241 return &admin_queue->comp_ctx[command_id];
244 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
245 struct ena_admin_aq_entry *cmd,
246 size_t cmd_size_in_bytes,
247 struct ena_admin_acq_entry *comp,
248 size_t comp_size_in_bytes)
250 struct ena_comp_ctx *comp_ctx;
251 u16 tail_masked, cmd_id;
255 queue_size_mask = admin_queue->q_depth - 1;
257 tail_masked = admin_queue->sq.tail & queue_size_mask;
259 /* In case of queue FULL */
260 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
261 if (cnt >= admin_queue->q_depth) {
262 ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
263 admin_queue->stats.out_of_space++;
264 return ERR_PTR(ENA_COM_NO_SPACE);
267 cmd_id = admin_queue->curr_cmd_id;
269 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
270 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
272 cmd->aq_common_descriptor.command_id |= cmd_id &
273 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
275 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
276 if (unlikely(!comp_ctx))
277 return ERR_PTR(ENA_COM_INVAL);
279 comp_ctx->status = ENA_CMD_SUBMITTED;
280 comp_ctx->comp_size = (u32)comp_size_in_bytes;
281 comp_ctx->user_cqe = comp;
282 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
284 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
286 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
288 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
291 admin_queue->sq.tail++;
292 admin_queue->stats.submitted_cmd++;
294 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
295 admin_queue->sq.phase = !admin_queue->sq.phase;
297 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
298 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
299 admin_queue->sq.db_addr);
304 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
306 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
307 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
308 struct ena_comp_ctx *comp_ctx;
311 admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
312 if (unlikely(!admin_queue->comp_ctx)) {
313 ena_trc_err(ena_dev, "Memory allocation failed\n");
314 return ENA_COM_NO_MEM;
317 for (i = 0; i < admin_queue->q_depth; i++) {
318 comp_ctx = get_comp_ctxt(admin_queue, i, false);
320 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
326 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
327 struct ena_admin_aq_entry *cmd,
328 size_t cmd_size_in_bytes,
329 struct ena_admin_acq_entry *comp,
330 size_t comp_size_in_bytes)
332 unsigned long flags = 0;
333 struct ena_comp_ctx *comp_ctx;
335 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
336 if (unlikely(!admin_queue->running_state)) {
337 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
338 return ERR_PTR(ENA_COM_NO_DEVICE);
340 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
344 if (IS_ERR(comp_ctx))
345 admin_queue->running_state = false;
346 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
351 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
352 struct ena_com_create_io_ctx *ctx,
353 struct ena_com_io_sq *io_sq)
358 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
360 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
361 io_sq->desc_entry_size =
362 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
363 sizeof(struct ena_eth_io_tx_desc) :
364 sizeof(struct ena_eth_io_rx_desc);
366 size = io_sq->desc_entry_size * io_sq->q_depth;
367 io_sq->bus = ena_dev->bus;
369 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
370 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
372 io_sq->desc_addr.virt_addr,
373 io_sq->desc_addr.phys_addr,
374 io_sq->desc_addr.mem_handle,
377 if (!io_sq->desc_addr.virt_addr) {
378 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
380 io_sq->desc_addr.virt_addr,
381 io_sq->desc_addr.phys_addr,
382 io_sq->desc_addr.mem_handle);
385 if (!io_sq->desc_addr.virt_addr) {
386 ena_trc_err(ena_dev, "Memory allocation failed\n");
387 return ENA_COM_NO_MEM;
391 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
392 /* Allocate bounce buffers */
393 io_sq->bounce_buf_ctrl.buffer_size =
394 ena_dev->llq_info.desc_list_entry_size;
395 io_sq->bounce_buf_ctrl.buffers_num =
396 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
397 io_sq->bounce_buf_ctrl.next_to_use = 0;
399 size = io_sq->bounce_buf_ctrl.buffer_size *
400 io_sq->bounce_buf_ctrl.buffers_num;
402 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
404 io_sq->bounce_buf_ctrl.base_buffer,
407 if (!io_sq->bounce_buf_ctrl.base_buffer)
408 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
410 if (!io_sq->bounce_buf_ctrl.base_buffer) {
411 ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
412 return ENA_COM_NO_MEM;
415 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
416 sizeof(io_sq->llq_info));
418 /* Initiate the first bounce buffer */
419 io_sq->llq_buf_ctrl.curr_bounce_buf =
420 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
421 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
422 0x0, io_sq->llq_info.desc_list_entry_size);
423 io_sq->llq_buf_ctrl.descs_left_in_line =
424 io_sq->llq_info.descs_num_before_header;
425 io_sq->disable_meta_caching =
426 io_sq->llq_info.disable_meta_caching;
428 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
429 io_sq->entries_in_tx_burst_left =
430 io_sq->llq_info.max_entries_in_tx_burst;
434 io_sq->next_to_comp = 0;
440 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
441 struct ena_com_create_io_ctx *ctx,
442 struct ena_com_io_cq *io_cq)
447 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
449 /* Use the basic completion descriptor for Rx */
450 io_cq->cdesc_entry_size_in_bytes =
451 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
452 sizeof(struct ena_eth_io_tx_cdesc) :
453 sizeof(struct ena_eth_io_rx_cdesc_base);
455 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
456 io_cq->bus = ena_dev->bus;
458 ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
460 io_cq->cdesc_addr.virt_addr,
461 io_cq->cdesc_addr.phys_addr,
462 io_cq->cdesc_addr.mem_handle,
465 ENA_CDESC_RING_SIZE_ALIGNMENT);
466 if (!io_cq->cdesc_addr.virt_addr) {
467 ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
469 io_cq->cdesc_addr.virt_addr,
470 io_cq->cdesc_addr.phys_addr,
471 io_cq->cdesc_addr.mem_handle,
472 ENA_CDESC_RING_SIZE_ALIGNMENT);
475 if (!io_cq->cdesc_addr.virt_addr) {
476 ena_trc_err(ena_dev, "Memory allocation failed\n");
477 return ENA_COM_NO_MEM;
486 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
487 struct ena_admin_acq_entry *cqe)
489 struct ena_comp_ctx *comp_ctx;
492 cmd_id = cqe->acq_common_descriptor.command &
493 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
495 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
496 if (unlikely(!comp_ctx)) {
497 ena_trc_err(admin_queue->ena_dev,
498 "comp_ctx is NULL. Changing the admin queue running state\n");
499 admin_queue->running_state = false;
503 comp_ctx->status = ENA_CMD_COMPLETED;
504 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
506 if (comp_ctx->user_cqe)
507 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
509 if (!admin_queue->polling)
510 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
513 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
515 struct ena_admin_acq_entry *cqe = NULL;
520 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
521 phase = admin_queue->cq.phase;
523 cqe = &admin_queue->cq.entries[head_masked];
525 /* Go over all the completions */
526 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
527 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
528 /* Do not read the rest of the completion entry before the
529 * phase bit was validated
532 ena_com_handle_single_admin_completion(admin_queue, cqe);
536 if (unlikely(head_masked == admin_queue->q_depth)) {
541 cqe = &admin_queue->cq.entries[head_masked];
544 admin_queue->cq.head += comp_num;
545 admin_queue->cq.phase = phase;
546 admin_queue->sq.head += comp_num;
547 admin_queue->stats.completed_cmd += comp_num;
550 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
553 if (unlikely(comp_status != 0))
554 ena_trc_err(admin_queue->ena_dev,
555 "Admin command failed[%u]\n", comp_status);
557 switch (comp_status) {
558 case ENA_ADMIN_SUCCESS:
560 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
561 return ENA_COM_NO_MEM;
562 case ENA_ADMIN_UNSUPPORTED_OPCODE:
563 return ENA_COM_UNSUPPORTED;
564 case ENA_ADMIN_BAD_OPCODE:
565 case ENA_ADMIN_MALFORMED_REQUEST:
566 case ENA_ADMIN_ILLEGAL_PARAMETER:
567 case ENA_ADMIN_UNKNOWN_ERROR:
568 return ENA_COM_INVAL;
569 case ENA_ADMIN_RESOURCE_BUSY:
570 return ENA_COM_TRY_AGAIN;
573 return ENA_COM_INVAL;
576 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
578 delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
579 delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
580 ENA_USLEEP(delay_us);
583 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
584 struct ena_com_admin_queue *admin_queue)
586 unsigned long flags = 0;
591 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
594 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
595 ena_com_handle_admin_completion(admin_queue);
596 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
598 if (comp_ctx->status != ENA_CMD_SUBMITTED)
601 if (ENA_TIME_EXPIRE(timeout)) {
602 ena_trc_err(admin_queue->ena_dev,
603 "Wait for completion (polling) timeout\n");
604 /* ENA didn't have any completion */
605 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
606 admin_queue->stats.no_completion++;
607 admin_queue->running_state = false;
608 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
610 ret = ENA_COM_TIMER_EXPIRED;
614 ena_delay_exponential_backoff_us(exp++,
615 admin_queue->ena_dev->ena_min_poll_delay_us);
618 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
619 ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
620 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
621 admin_queue->stats.aborted_cmd++;
622 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
623 ret = ENA_COM_NO_DEVICE;
627 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
628 admin_queue->ena_dev, "Invalid comp status %d\n",
631 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
633 comp_ctxt_release(admin_queue, comp_ctx);
638 * Set the LLQ configurations of the firmware
640 * The driver provides only the enabled feature values to the device,
641 * which in turn, checks if they are supported.
643 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
645 struct ena_com_admin_queue *admin_queue;
646 struct ena_admin_set_feat_cmd cmd;
647 struct ena_admin_set_feat_resp resp;
648 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
651 memset(&cmd, 0x0, sizeof(cmd));
652 admin_queue = &ena_dev->admin_queue;
654 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
655 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
657 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
658 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
659 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
660 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
662 cmd.u.llq.accel_mode.u.set.enabled_flags =
663 BIT(ENA_ADMIN_DISABLE_META_CACHING) |
664 BIT(ENA_ADMIN_LIMIT_TX_BURST);
666 ret = ena_com_execute_admin_command(admin_queue,
667 (struct ena_admin_aq_entry *)&cmd,
669 (struct ena_admin_acq_entry *)&resp,
673 ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret);
678 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
679 struct ena_admin_feature_llq_desc *llq_features,
680 struct ena_llq_configurations *llq_default_cfg)
682 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
683 struct ena_admin_accel_mode_get llq_accel_mode_get;
687 memset(llq_info, 0, sizeof(*llq_info));
689 supported_feat = llq_features->header_location_ctrl_supported;
691 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
692 llq_info->header_location_ctrl =
693 llq_default_cfg->llq_header_location;
695 ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n",
700 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
701 supported_feat = llq_features->descriptors_stride_ctrl_supported;
702 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
703 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
705 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
706 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
707 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
708 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
710 ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n",
715 ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
716 llq_default_cfg->llq_stride_ctrl,
718 llq_info->desc_stride_ctrl);
721 llq_info->desc_stride_ctrl = 0;
724 supported_feat = llq_features->entry_size_ctrl_supported;
725 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
726 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
727 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
729 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
730 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
731 llq_info->desc_list_entry_size = 128;
732 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
733 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
734 llq_info->desc_list_entry_size = 192;
735 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
736 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
737 llq_info->desc_list_entry_size = 256;
739 ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n",
744 ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
745 llq_default_cfg->llq_ring_entry_size,
747 llq_info->desc_list_entry_size);
749 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
750 /* The desc list entry size should be whole multiply of 8
751 * This requirement comes from __iowrite64_copy()
753 ena_trc_err(ena_dev, "Illegal entry size %d\n",
754 llq_info->desc_list_entry_size);
758 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
759 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
760 sizeof(struct ena_eth_io_tx_desc);
762 llq_info->descs_per_entry = 1;
764 supported_feat = llq_features->desc_num_before_header_supported;
765 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
766 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
768 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
769 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
770 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
771 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
772 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
773 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
774 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
775 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
777 ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n",
782 ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
783 llq_default_cfg->llq_num_decs_before_header,
785 llq_info->descs_num_before_header);
787 /* Check for accelerated queue supported */
788 llq_accel_mode_get = llq_features->accel_mode.u.get;
790 llq_info->disable_meta_caching =
791 !!(llq_accel_mode_get.supported_flags &
792 BIT(ENA_ADMIN_DISABLE_META_CACHING));
794 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
795 llq_info->max_entries_in_tx_burst =
796 llq_accel_mode_get.max_tx_burst_size /
797 llq_default_cfg->llq_ring_entry_size_value;
799 rc = ena_com_set_llq(ena_dev);
801 ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
806 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
807 struct ena_com_admin_queue *admin_queue)
809 unsigned long flags = 0;
812 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
813 admin_queue->completion_timeout);
815 /* In case the command wasn't completed find out the root cause.
816 * There might be 2 kinds of errors
817 * 1) No completion (timeout reached)
818 * 2) There is completion but the device didn't get any msi-x interrupt.
820 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
821 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
822 ena_com_handle_admin_completion(admin_queue);
823 admin_queue->stats.no_completion++;
824 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
826 if (comp_ctx->status == ENA_CMD_COMPLETED) {
827 ena_trc_err(admin_queue->ena_dev,
828 "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
829 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
830 /* Check if fallback to polling is enabled */
831 if (admin_queue->auto_polling)
832 admin_queue->polling = true;
834 ena_trc_err(admin_queue->ena_dev,
835 "The ena device didn't send a completion for the admin cmd %d status %d\n",
836 comp_ctx->cmd_opcode, comp_ctx->status);
838 /* Check if shifted to polling mode.
839 * This will happen if there is a completion without an interrupt
840 * and autopolling mode is enabled. Continuing normal execution in such case
842 if (!admin_queue->polling) {
843 admin_queue->running_state = false;
844 ret = ENA_COM_TIMER_EXPIRED;
849 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
851 comp_ctxt_release(admin_queue, comp_ctx);
855 /* This method read the hardware device register through posting writes
856 * and waiting for response
857 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
859 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
861 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
862 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
863 mmio_read->read_resp;
864 u32 mmio_read_reg, ret, i;
865 unsigned long flags = 0;
866 u32 timeout = mmio_read->reg_read_to;
871 timeout = ENA_REG_READ_TIMEOUT;
873 /* If readless is disabled, perform regular read */
874 if (!mmio_read->readless_supported)
875 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
877 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
878 mmio_read->seq_num++;
880 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
881 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
882 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
883 mmio_read_reg |= mmio_read->seq_num &
884 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
886 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
887 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
889 for (i = 0; i < timeout; i++) {
890 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
896 if (unlikely(i == timeout)) {
897 ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
902 ret = ENA_MMIO_READ_TIMEOUT;
906 if (read_resp->reg_off != offset) {
907 ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
908 ret = ENA_MMIO_READ_TIMEOUT;
910 ret = read_resp->reg_val;
913 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
918 /* There are two types to wait for completion.
919 * Polling mode - wait until the completion is available.
920 * Async mode - wait on wait queue until the completion is ready
921 * (or the timeout expired).
922 * It is expected that the IRQ called ena_com_handle_admin_completion
923 * to mark the completions.
925 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
926 struct ena_com_admin_queue *admin_queue)
928 if (admin_queue->polling)
929 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
932 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
936 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
937 struct ena_com_io_sq *io_sq)
939 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
940 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
941 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
945 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
947 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
948 direction = ENA_ADMIN_SQ_DIRECTION_TX;
950 direction = ENA_ADMIN_SQ_DIRECTION_RX;
952 destroy_cmd.sq.sq_identity |= (direction <<
953 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
954 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
956 destroy_cmd.sq.sq_idx = io_sq->idx;
957 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
959 ret = ena_com_execute_admin_command(admin_queue,
960 (struct ena_admin_aq_entry *)&destroy_cmd,
962 (struct ena_admin_acq_entry *)&destroy_resp,
963 sizeof(destroy_resp));
965 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
966 ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret);
971 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
972 struct ena_com_io_sq *io_sq,
973 struct ena_com_io_cq *io_cq)
977 if (io_cq->cdesc_addr.virt_addr) {
978 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
980 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
982 io_cq->cdesc_addr.virt_addr,
983 io_cq->cdesc_addr.phys_addr,
984 io_cq->cdesc_addr.mem_handle);
986 io_cq->cdesc_addr.virt_addr = NULL;
989 if (io_sq->desc_addr.virt_addr) {
990 size = io_sq->desc_entry_size * io_sq->q_depth;
992 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
994 io_sq->desc_addr.virt_addr,
995 io_sq->desc_addr.phys_addr,
996 io_sq->desc_addr.mem_handle);
998 io_sq->desc_addr.virt_addr = NULL;
1001 if (io_sq->bounce_buf_ctrl.base_buffer) {
1002 ENA_MEM_FREE(ena_dev->dmadev,
1003 io_sq->bounce_buf_ctrl.base_buffer,
1004 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
1005 io_sq->bounce_buf_ctrl.base_buffer = NULL;
1009 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
1013 ena_time_t timeout_stamp;
1015 /* Convert timeout from resolution of 100ms to us resolution. */
1016 timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
1019 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1021 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
1022 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1023 return ENA_COM_TIMER_EXPIRED;
1026 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
1030 if (ENA_TIME_EXPIRE(timeout_stamp))
1031 return ENA_COM_TIMER_EXPIRED;
1033 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1037 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1038 enum ena_admin_aq_feature_id feature_id)
1040 u32 feature_mask = 1 << feature_id;
1042 /* Device attributes is always supported */
1043 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1044 !(ena_dev->supported_features & feature_mask))
1050 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1051 struct ena_admin_get_feat_resp *get_resp,
1052 enum ena_admin_aq_feature_id feature_id,
1053 dma_addr_t control_buf_dma_addr,
1054 u32 control_buff_size,
1057 struct ena_com_admin_queue *admin_queue;
1058 struct ena_admin_get_feat_cmd get_cmd;
1061 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1062 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id);
1063 return ENA_COM_UNSUPPORTED;
1066 memset(&get_cmd, 0x0, sizeof(get_cmd));
1067 admin_queue = &ena_dev->admin_queue;
1069 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1071 if (control_buff_size)
1072 get_cmd.aq_common_descriptor.flags =
1073 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1075 get_cmd.aq_common_descriptor.flags = 0;
1077 ret = ena_com_mem_addr_set(ena_dev,
1078 &get_cmd.control_buffer.address,
1079 control_buf_dma_addr);
1080 if (unlikely(ret)) {
1081 ena_trc_err(ena_dev, "Memory address set failed\n");
1085 get_cmd.control_buffer.length = control_buff_size;
1086 get_cmd.feat_common.feature_version = feature_ver;
1087 get_cmd.feat_common.feature_id = feature_id;
1089 ret = ena_com_execute_admin_command(admin_queue,
1090 (struct ena_admin_aq_entry *)
1093 (struct ena_admin_acq_entry *)
1098 ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n",
1104 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1105 struct ena_admin_get_feat_resp *get_resp,
1106 enum ena_admin_aq_feature_id feature_id,
1109 return ena_com_get_feature_ex(ena_dev,
1117 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1119 return ena_dev->rss.hash_func;
1122 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1124 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1125 (ena_dev->rss).hash_key;
1127 ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1128 /* The key buffer is stored in the device in an array of
1131 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1134 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1136 struct ena_rss *rss = &ena_dev->rss;
1138 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1139 return ENA_COM_UNSUPPORTED;
1141 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1142 sizeof(*rss->hash_key),
1144 rss->hash_key_dma_addr,
1145 rss->hash_key_mem_handle);
1147 if (unlikely(!rss->hash_key))
1148 return ENA_COM_NO_MEM;
1153 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1155 struct ena_rss *rss = &ena_dev->rss;
1158 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1159 sizeof(*rss->hash_key),
1161 rss->hash_key_dma_addr,
1162 rss->hash_key_mem_handle);
1163 rss->hash_key = NULL;
1166 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1168 struct ena_rss *rss = &ena_dev->rss;
1170 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1171 sizeof(*rss->hash_ctrl),
1173 rss->hash_ctrl_dma_addr,
1174 rss->hash_ctrl_mem_handle);
1176 if (unlikely(!rss->hash_ctrl))
1177 return ENA_COM_NO_MEM;
1182 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1184 struct ena_rss *rss = &ena_dev->rss;
1187 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1188 sizeof(*rss->hash_ctrl),
1190 rss->hash_ctrl_dma_addr,
1191 rss->hash_ctrl_mem_handle);
1192 rss->hash_ctrl = NULL;
1195 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1198 struct ena_rss *rss = &ena_dev->rss;
1199 struct ena_admin_get_feat_resp get_resp;
1203 ret = ena_com_get_feature(ena_dev, &get_resp,
1204 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1208 if ((get_resp.u.ind_table.min_size > log_size) ||
1209 (get_resp.u.ind_table.max_size < log_size)) {
1210 ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1212 1 << get_resp.u.ind_table.min_size,
1213 1 << get_resp.u.ind_table.max_size);
1214 return ENA_COM_INVAL;
1217 tbl_size = (1ULL << log_size) *
1218 sizeof(struct ena_admin_rss_ind_table_entry);
1220 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1223 rss->rss_ind_tbl_dma_addr,
1224 rss->rss_ind_tbl_mem_handle);
1225 if (unlikely(!rss->rss_ind_tbl))
1228 tbl_size = (1ULL << log_size) * sizeof(u16);
1229 rss->host_rss_ind_tbl =
1230 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1231 if (unlikely(!rss->host_rss_ind_tbl))
1234 rss->tbl_log_size = log_size;
1239 tbl_size = (1ULL << log_size) *
1240 sizeof(struct ena_admin_rss_ind_table_entry);
1242 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1245 rss->rss_ind_tbl_dma_addr,
1246 rss->rss_ind_tbl_mem_handle);
1247 rss->rss_ind_tbl = NULL;
1249 rss->tbl_log_size = 0;
1250 return ENA_COM_NO_MEM;
1253 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1255 struct ena_rss *rss = &ena_dev->rss;
1256 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1257 sizeof(struct ena_admin_rss_ind_table_entry);
1259 if (rss->rss_ind_tbl)
1260 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1263 rss->rss_ind_tbl_dma_addr,
1264 rss->rss_ind_tbl_mem_handle);
1265 rss->rss_ind_tbl = NULL;
1267 if (rss->host_rss_ind_tbl)
1268 ENA_MEM_FREE(ena_dev->dmadev,
1269 rss->host_rss_ind_tbl,
1270 ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1271 rss->host_rss_ind_tbl = NULL;
1274 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1275 struct ena_com_io_sq *io_sq, u16 cq_idx)
1277 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1278 struct ena_admin_aq_create_sq_cmd create_cmd;
1279 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1283 memset(&create_cmd, 0x0, sizeof(create_cmd));
1285 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1287 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1288 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1290 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1292 create_cmd.sq_identity |= (direction <<
1293 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1294 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1296 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1297 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1299 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1300 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1301 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1303 create_cmd.sq_caps_3 |=
1304 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1306 create_cmd.cq_idx = cq_idx;
1307 create_cmd.sq_depth = io_sq->q_depth;
1309 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1310 ret = ena_com_mem_addr_set(ena_dev,
1312 io_sq->desc_addr.phys_addr);
1313 if (unlikely(ret)) {
1314 ena_trc_err(ena_dev, "Memory address set failed\n");
1319 ret = ena_com_execute_admin_command(admin_queue,
1320 (struct ena_admin_aq_entry *)&create_cmd,
1322 (struct ena_admin_acq_entry *)&cmd_completion,
1323 sizeof(cmd_completion));
1324 if (unlikely(ret)) {
1325 ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret);
1329 io_sq->idx = cmd_completion.sq_idx;
1331 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1332 (uintptr_t)cmd_completion.sq_doorbell_offset);
1334 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1335 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1336 + cmd_completion.llq_headers_offset);
1338 io_sq->desc_addr.pbuf_dev_addr =
1339 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1340 cmd_completion.llq_descriptors_offset);
1343 ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1348 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1350 struct ena_rss *rss = &ena_dev->rss;
1351 struct ena_com_io_sq *io_sq;
1355 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1356 qid = rss->host_rss_ind_tbl[i];
1357 if (qid >= ENA_TOTAL_NUM_QUEUES)
1358 return ENA_COM_INVAL;
1360 io_sq = &ena_dev->io_sq_queues[qid];
1362 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1363 return ENA_COM_INVAL;
1365 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1371 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1372 u16 intr_delay_resolution)
1374 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1376 if (unlikely(!intr_delay_resolution)) {
1377 ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1378 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1382 ena_dev->intr_moder_rx_interval =
1383 ena_dev->intr_moder_rx_interval *
1384 prev_intr_delay_resolution /
1385 intr_delay_resolution;
1388 ena_dev->intr_moder_tx_interval =
1389 ena_dev->intr_moder_tx_interval *
1390 prev_intr_delay_resolution /
1391 intr_delay_resolution;
1393 ena_dev->intr_delay_resolution = intr_delay_resolution;
1396 /*****************************************************************************/
1397 /******************************* API ******************************/
1398 /*****************************************************************************/
1400 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1401 struct ena_admin_aq_entry *cmd,
1403 struct ena_admin_acq_entry *comp,
1406 struct ena_comp_ctx *comp_ctx;
1409 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1411 if (IS_ERR(comp_ctx)) {
1412 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1413 ena_trc_dbg(admin_queue->ena_dev,
1414 "Failed to submit command [%ld]\n",
1417 ena_trc_err(admin_queue->ena_dev,
1418 "Failed to submit command [%ld]\n",
1421 return (int)PTR_ERR(comp_ctx);
1424 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1425 if (unlikely(ret)) {
1426 if (admin_queue->running_state)
1427 ena_trc_err(admin_queue->ena_dev,
1428 "Failed to process command. ret = %d\n", ret);
1430 ena_trc_dbg(admin_queue->ena_dev,
1431 "Failed to process command. ret = %d\n", ret);
1436 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1437 struct ena_com_io_cq *io_cq)
1439 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1440 struct ena_admin_aq_create_cq_cmd create_cmd;
1441 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1444 memset(&create_cmd, 0x0, sizeof(create_cmd));
1446 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1448 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1449 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1450 create_cmd.cq_caps_1 |=
1451 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1453 create_cmd.msix_vector = io_cq->msix_vector;
1454 create_cmd.cq_depth = io_cq->q_depth;
1456 ret = ena_com_mem_addr_set(ena_dev,
1458 io_cq->cdesc_addr.phys_addr);
1459 if (unlikely(ret)) {
1460 ena_trc_err(ena_dev, "Memory address set failed\n");
1464 ret = ena_com_execute_admin_command(admin_queue,
1465 (struct ena_admin_aq_entry *)&create_cmd,
1467 (struct ena_admin_acq_entry *)&cmd_completion,
1468 sizeof(cmd_completion));
1469 if (unlikely(ret)) {
1470 ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret);
1474 io_cq->idx = cmd_completion.cq_idx;
1476 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1477 cmd_completion.cq_interrupt_unmask_register_offset);
1479 if (cmd_completion.cq_head_db_register_offset)
1480 io_cq->cq_head_db_reg =
1481 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1482 cmd_completion.cq_head_db_register_offset);
1484 if (cmd_completion.numa_node_register_offset)
1485 io_cq->numa_node_cfg_reg =
1486 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1487 cmd_completion.numa_node_register_offset);
1489 ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1494 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1495 struct ena_com_io_sq **io_sq,
1496 struct ena_com_io_cq **io_cq)
1498 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1499 ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
1500 qid, ENA_TOTAL_NUM_QUEUES);
1501 return ENA_COM_INVAL;
1504 *io_sq = &ena_dev->io_sq_queues[qid];
1505 *io_cq = &ena_dev->io_cq_queues[qid];
1510 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1512 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1513 struct ena_comp_ctx *comp_ctx;
1516 if (!admin_queue->comp_ctx)
1519 for (i = 0; i < admin_queue->q_depth; i++) {
1520 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1521 if (unlikely(!comp_ctx))
1524 comp_ctx->status = ENA_CMD_ABORTED;
1526 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1530 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1532 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1533 unsigned long flags = 0;
1536 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1537 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1538 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1539 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1540 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1542 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1545 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1546 struct ena_com_io_cq *io_cq)
1548 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1549 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1550 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1553 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1555 destroy_cmd.cq_idx = io_cq->idx;
1556 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1558 ret = ena_com_execute_admin_command(admin_queue,
1559 (struct ena_admin_aq_entry *)&destroy_cmd,
1560 sizeof(destroy_cmd),
1561 (struct ena_admin_acq_entry *)&destroy_resp,
1562 sizeof(destroy_resp));
1564 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1565 ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret);
1570 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1572 return ena_dev->admin_queue.running_state;
1575 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1577 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1578 unsigned long flags = 0;
1580 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1581 ena_dev->admin_queue.running_state = state;
1582 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1585 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1587 u16 depth = ena_dev->aenq.q_depth;
1589 ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
1591 /* Init head_db to mark that all entries in the queue
1592 * are initially available
1594 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1597 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1599 struct ena_com_admin_queue *admin_queue;
1600 struct ena_admin_set_feat_cmd cmd;
1601 struct ena_admin_set_feat_resp resp;
1602 struct ena_admin_get_feat_resp get_resp;
1605 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1607 ena_trc_info(ena_dev, "Can't get aenq configuration\n");
1611 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1612 ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1613 get_resp.u.aenq.supported_groups,
1615 return ENA_COM_UNSUPPORTED;
1618 memset(&cmd, 0x0, sizeof(cmd));
1619 admin_queue = &ena_dev->admin_queue;
1621 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1622 cmd.aq_common_descriptor.flags = 0;
1623 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1624 cmd.u.aenq.enabled_groups = groups_flag;
1626 ret = ena_com_execute_admin_command(admin_queue,
1627 (struct ena_admin_aq_entry *)&cmd,
1629 (struct ena_admin_acq_entry *)&resp,
1633 ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret);
1638 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1640 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1643 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1644 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1645 return ENA_COM_TIMER_EXPIRED;
1648 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1649 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1651 ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
1653 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1654 ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
1655 return ENA_COM_INVAL;
1658 ena_dev->dma_addr_bits = width;
1663 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1667 u32 ctrl_ver_masked;
1669 /* Make sure the ENA version and the controller version are at least
1670 * as the driver expects
1672 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1673 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1674 ENA_REGS_CONTROLLER_VERSION_OFF);
1676 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1677 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1678 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1679 return ENA_COM_TIMER_EXPIRED;
1682 ena_trc_info(ena_dev, "ENA device version: %d.%d\n",
1683 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1684 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1685 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1687 ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n",
1688 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1689 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1690 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1691 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1692 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1693 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1694 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1697 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1698 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1699 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1701 /* Validate the ctrl version without the implementation ID */
1702 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1703 ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1711 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1712 struct ena_com_admin_queue *admin_queue)
1715 if (!admin_queue->comp_ctx)
1718 ENA_WAIT_EVENTS_DESTROY(admin_queue);
1719 ENA_MEM_FREE(ena_dev->dmadev,
1720 admin_queue->comp_ctx,
1721 (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1723 admin_queue->comp_ctx = NULL;
1726 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1728 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1729 struct ena_com_admin_cq *cq = &admin_queue->cq;
1730 struct ena_com_admin_sq *sq = &admin_queue->sq;
1731 struct ena_com_aenq *aenq = &ena_dev->aenq;
1734 ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1736 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1738 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1739 sq->dma_addr, sq->mem_handle);
1742 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1744 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1745 cq->dma_addr, cq->mem_handle);
1748 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1749 if (ena_dev->aenq.entries)
1750 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1751 aenq->dma_addr, aenq->mem_handle);
1752 aenq->entries = NULL;
1753 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1756 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1761 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1763 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1764 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1765 ena_dev->admin_queue.polling = polling;
1768 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
1770 return ena_dev->admin_queue.polling;
1773 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1776 ena_dev->admin_queue.auto_polling = polling;
1779 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1781 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1783 ENA_SPINLOCK_INIT(mmio_read->lock);
1784 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1785 sizeof(*mmio_read->read_resp),
1786 mmio_read->read_resp,
1787 mmio_read->read_resp_dma_addr,
1788 mmio_read->read_resp_mem_handle);
1789 if (unlikely(!mmio_read->read_resp))
1792 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1794 mmio_read->read_resp->req_id = 0x0;
1795 mmio_read->seq_num = 0x0;
1796 mmio_read->readless_supported = true;
1801 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1802 return ENA_COM_NO_MEM;
1805 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1807 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1809 mmio_read->readless_supported = readless_supported;
1812 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1814 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1816 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1817 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1819 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1820 sizeof(*mmio_read->read_resp),
1821 mmio_read->read_resp,
1822 mmio_read->read_resp_dma_addr,
1823 mmio_read->read_resp_mem_handle);
1825 mmio_read->read_resp = NULL;
1826 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1829 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1831 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1832 u32 addr_low, addr_high;
1834 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1835 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1837 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1838 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1841 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1842 struct ena_aenq_handlers *aenq_handlers)
1844 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1845 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1848 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1850 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1851 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1852 return ENA_COM_TIMER_EXPIRED;
1855 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1856 ena_trc_err(ena_dev, "Device isn't ready, abort com init\n");
1857 return ENA_COM_NO_DEVICE;
1860 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1862 admin_queue->bus = ena_dev->bus;
1863 admin_queue->q_dmadev = ena_dev->dmadev;
1864 admin_queue->polling = false;
1865 admin_queue->curr_cmd_id = 0;
1867 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1869 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1871 ret = ena_com_init_comp_ctxt(admin_queue);
1875 ret = ena_com_admin_init_sq(admin_queue);
1879 ret = ena_com_admin_init_cq(admin_queue);
1883 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1884 ENA_REGS_AQ_DB_OFF);
1886 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1887 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1889 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1890 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1892 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1893 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1895 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1896 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1899 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1900 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1901 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1902 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1905 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1906 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1907 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1908 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1910 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1911 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1912 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1916 admin_queue->ena_dev = ena_dev;
1917 admin_queue->running_state = true;
1921 ena_com_admin_destroy(ena_dev);
1926 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1927 struct ena_com_create_io_ctx *ctx)
1929 struct ena_com_io_sq *io_sq;
1930 struct ena_com_io_cq *io_cq;
1933 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1934 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
1935 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1936 return ENA_COM_INVAL;
1939 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1940 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1942 memset(io_sq, 0x0, sizeof(*io_sq));
1943 memset(io_cq, 0x0, sizeof(*io_cq));
1946 io_cq->q_depth = ctx->queue_size;
1947 io_cq->direction = ctx->direction;
1948 io_cq->qid = ctx->qid;
1950 io_cq->msix_vector = ctx->msix_vector;
1952 io_sq->q_depth = ctx->queue_size;
1953 io_sq->direction = ctx->direction;
1954 io_sq->qid = ctx->qid;
1956 io_sq->mem_queue_type = ctx->mem_queue_type;
1958 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1959 /* header length is limited to 8 bits */
1960 io_sq->tx_max_header_size =
1961 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1963 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1966 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1970 ret = ena_com_create_io_cq(ena_dev, io_cq);
1974 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1981 ena_com_destroy_io_cq(ena_dev, io_cq);
1983 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1987 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1989 struct ena_com_io_sq *io_sq;
1990 struct ena_com_io_cq *io_cq;
1992 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1993 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
1994 qid, ENA_TOTAL_NUM_QUEUES);
1998 io_sq = &ena_dev->io_sq_queues[qid];
1999 io_cq = &ena_dev->io_cq_queues[qid];
2001 ena_com_destroy_io_sq(ena_dev, io_sq);
2002 ena_com_destroy_io_cq(ena_dev, io_cq);
2004 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
2007 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
2008 struct ena_admin_get_feat_resp *resp)
2010 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
2013 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
2014 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2016 struct ena_admin_get_feat_resp get_resp;
2019 rc = ena_com_get_feature(ena_dev, &get_resp,
2020 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
2024 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
2025 sizeof(get_resp.u.dev_attr));
2027 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2029 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2030 rc = ena_com_get_feature(ena_dev, &get_resp,
2031 ENA_ADMIN_MAX_QUEUES_EXT,
2032 ENA_FEATURE_MAX_QUEUE_EXT_VER);
2036 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
2039 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2040 sizeof(get_resp.u.max_queue_ext));
2041 ena_dev->tx_max_header_size =
2042 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
2044 rc = ena_com_get_feature(ena_dev, &get_resp,
2045 ENA_ADMIN_MAX_QUEUES_NUM, 0);
2046 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2047 sizeof(get_resp.u.max_queue));
2048 ena_dev->tx_max_header_size =
2049 get_resp.u.max_queue.max_header_size;
2055 rc = ena_com_get_feature(ena_dev, &get_resp,
2056 ENA_ADMIN_AENQ_CONFIG, 0);
2060 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2061 sizeof(get_resp.u.aenq));
2063 rc = ena_com_get_feature(ena_dev, &get_resp,
2064 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2068 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2069 sizeof(get_resp.u.offload));
2071 /* Driver hints isn't mandatory admin command. So in case the
2072 * command isn't supported set driver hints to 0
2074 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2077 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2078 sizeof(get_resp.u.hw_hints));
2079 else if (rc == ENA_COM_UNSUPPORTED)
2080 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2084 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2086 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2087 sizeof(get_resp.u.llq));
2088 else if (rc == ENA_COM_UNSUPPORTED)
2089 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2096 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2098 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2101 /* ena_handle_specific_aenq_event:
2102 * return the handler that is relevant to the specific event group
2104 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2107 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2109 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2110 return aenq_handlers->handlers[group];
2112 return aenq_handlers->unimplemented_handler;
2115 /* ena_aenq_intr_handler:
2116 * handles the aenq incoming events.
2117 * pop events from the queue and apply the specific handler
2119 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2121 struct ena_admin_aenq_entry *aenq_e;
2122 struct ena_admin_aenq_common_desc *aenq_common;
2123 struct ena_com_aenq *aenq = &ena_dev->aenq;
2125 ena_aenq_handler handler_cb;
2126 u16 masked_head, processed = 0;
2129 masked_head = aenq->head & (aenq->q_depth - 1);
2130 phase = aenq->phase;
2131 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2132 aenq_common = &aenq_e->aenq_common_desc;
2134 /* Go over all the events */
2135 while ((READ_ONCE8(aenq_common->flags) &
2136 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2137 /* Make sure the phase bit (ownership) is as expected before
2138 * reading the rest of the descriptor.
2142 timestamp = (u64)aenq_common->timestamp_low |
2143 ((u64)aenq_common->timestamp_high << 32);
2145 ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2147 aenq_common->syndrome,
2150 /* Handle specific event*/
2151 handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2152 aenq_common->group);
2153 handler_cb(data, aenq_e); /* call the actual event handler*/
2155 /* Get next event entry */
2159 if (unlikely(masked_head == aenq->q_depth)) {
2163 aenq_e = &aenq->entries[masked_head];
2164 aenq_common = &aenq_e->aenq_common_desc;
2167 aenq->head += processed;
2168 aenq->phase = phase;
2170 /* Don't update aenq doorbell if there weren't any processed events */
2174 /* write the aenq doorbell after all AENQ descriptors were read */
2176 ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head,
2177 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2180 #ifdef ENA_EXTENDED_STATS
2182 * Sets the function Idx and Queue Idx to be used for
2183 * get full statistics feature
2186 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
2190 /* Function & Queue is acquired from user in the following format :
2191 * Bottom Half word: funct
2192 * Top Half Word: queue
2194 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2195 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2200 #endif /* ENA_EXTENDED_STATS */
2202 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2203 enum ena_regs_reset_reason_types reset_reason)
2205 u32 stat, timeout, cap, reset_val;
2208 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2209 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2211 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2212 (cap == ENA_MMIO_READ_TIMEOUT))) {
2213 ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
2214 return ENA_COM_TIMER_EXPIRED;
2217 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2218 ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n");
2219 return ENA_COM_INVAL;
2222 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2223 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2225 ena_trc_err(ena_dev, "Invalid timeout value\n");
2226 return ENA_COM_INVAL;
2230 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2231 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2232 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2233 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2235 /* Write again the MMIO read request address */
2236 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2238 rc = wait_for_reset_state(ena_dev, timeout,
2239 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2241 ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
2246 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2247 rc = wait_for_reset_state(ena_dev, timeout, 0);
2249 ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
2253 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2254 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2256 /* the resolution of timeout reg is 100ms */
2257 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2259 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2264 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2265 struct ena_com_stats_ctx *ctx,
2266 enum ena_admin_get_stats_type type)
2268 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2269 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2270 struct ena_com_admin_queue *admin_queue;
2273 admin_queue = &ena_dev->admin_queue;
2275 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2276 get_cmd->aq_common_descriptor.flags = 0;
2277 get_cmd->type = type;
2279 ret = ena_com_execute_admin_command(admin_queue,
2280 (struct ena_admin_aq_entry *)get_cmd,
2282 (struct ena_admin_acq_entry *)get_resp,
2286 ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret);
2291 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2292 struct ena_admin_eni_stats *stats)
2294 struct ena_com_stats_ctx ctx;
2297 memset(&ctx, 0x0, sizeof(ctx));
2298 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2299 if (likely(ret == 0))
2300 memcpy(stats, &ctx.get_resp.u.eni_stats,
2301 sizeof(ctx.get_resp.u.eni_stats));
2306 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2307 struct ena_admin_basic_stats *stats)
2309 struct ena_com_stats_ctx ctx;
2312 memset(&ctx, 0x0, sizeof(ctx));
2313 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2314 if (likely(ret == 0))
2315 memcpy(stats, &ctx.get_resp.u.basic_stats,
2316 sizeof(ctx.get_resp.u.basic_stats));
2320 #ifdef ENA_EXTENDED_STATS
2322 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
2325 struct ena_com_stats_ctx ctx;
2326 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
2327 ena_mem_handle_t mem_handle;
2329 dma_addr_t phys_addr;
2332 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2333 virt_addr, phys_addr, mem_handle);
2335 ret = ENA_COM_NO_MEM;
2338 memset(&ctx, 0x0, sizeof(ctx));
2339 ret = ena_com_mem_addr_set(ena_dev,
2340 &get_cmd->u.control_buffer.address,
2342 if (unlikely(ret)) {
2343 ena_trc_err(ena_dev, "Memory address set failed\n");
2344 goto free_ext_stats_mem;
2346 get_cmd->u.control_buffer.length = len;
2348 get_cmd->device_id = ena_dev->stats_func;
2349 get_cmd->queue_idx = ena_dev->stats_queue;
2351 ret = ena_get_dev_stats(ena_dev, &ctx,
2352 ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
2354 goto free_ext_stats_mem;
2356 ret = snprintf(buff, len, "%s", (char *)virt_addr);
2359 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2366 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2368 struct ena_com_admin_queue *admin_queue;
2369 struct ena_admin_set_feat_cmd cmd;
2370 struct ena_admin_set_feat_resp resp;
2373 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2374 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
2375 return ENA_COM_UNSUPPORTED;
2378 memset(&cmd, 0x0, sizeof(cmd));
2379 admin_queue = &ena_dev->admin_queue;
2381 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2382 cmd.aq_common_descriptor.flags = 0;
2383 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2384 cmd.u.mtu.mtu = (u32)mtu;
2386 ret = ena_com_execute_admin_command(admin_queue,
2387 (struct ena_admin_aq_entry *)&cmd,
2389 (struct ena_admin_acq_entry *)&resp,
2393 ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret);
2398 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2399 struct ena_admin_feature_offload_desc *offload)
2402 struct ena_admin_get_feat_resp resp;
2404 ret = ena_com_get_feature(ena_dev, &resp,
2405 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2406 if (unlikely(ret)) {
2407 ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret);
2411 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2416 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2418 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2419 struct ena_rss *rss = &ena_dev->rss;
2420 struct ena_admin_set_feat_cmd cmd;
2421 struct ena_admin_set_feat_resp resp;
2422 struct ena_admin_get_feat_resp get_resp;
2425 if (!ena_com_check_supported_feature_id(ena_dev,
2426 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2427 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2428 ENA_ADMIN_RSS_HASH_FUNCTION);
2429 return ENA_COM_UNSUPPORTED;
2432 /* Validate hash function is supported */
2433 ret = ena_com_get_feature(ena_dev, &get_resp,
2434 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2438 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2439 ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n",
2441 return ENA_COM_UNSUPPORTED;
2444 memset(&cmd, 0x0, sizeof(cmd));
2446 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2447 cmd.aq_common_descriptor.flags =
2448 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2449 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2450 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2451 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2453 ret = ena_com_mem_addr_set(ena_dev,
2454 &cmd.control_buffer.address,
2455 rss->hash_key_dma_addr);
2456 if (unlikely(ret)) {
2457 ena_trc_err(ena_dev, "Memory address set failed\n");
2461 cmd.control_buffer.length = sizeof(*rss->hash_key);
2463 ret = ena_com_execute_admin_command(admin_queue,
2464 (struct ena_admin_aq_entry *)&cmd,
2466 (struct ena_admin_acq_entry *)&resp,
2468 if (unlikely(ret)) {
2469 ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n",
2470 rss->hash_func, ret);
2471 return ENA_COM_INVAL;
2477 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2478 enum ena_admin_hash_functions func,
2479 const u8 *key, u16 key_len, u32 init_val)
2481 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2482 struct ena_admin_get_feat_resp get_resp;
2483 enum ena_admin_hash_functions old_func;
2484 struct ena_rss *rss = &ena_dev->rss;
2487 hash_key = rss->hash_key;
2489 /* Make sure size is a mult of DWs */
2490 if (unlikely(key_len & 0x3))
2491 return ENA_COM_INVAL;
2493 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2494 ENA_ADMIN_RSS_HASH_FUNCTION,
2495 rss->hash_key_dma_addr,
2496 sizeof(*rss->hash_key), 0);
2500 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2501 ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func);
2502 return ENA_COM_UNSUPPORTED;
2506 case ENA_ADMIN_TOEPLITZ:
2508 if (key_len != sizeof(hash_key->key)) {
2509 ena_trc_err(ena_dev, "key len (%hu) doesn't equal the supported size (%zu)\n",
2510 key_len, sizeof(hash_key->key));
2511 return ENA_COM_INVAL;
2513 memcpy(hash_key->key, key, key_len);
2514 rss->hash_init_val = init_val;
2515 hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2518 case ENA_ADMIN_CRC32:
2519 rss->hash_init_val = init_val;
2522 ena_trc_err(ena_dev, "Invalid hash function (%d)\n", func);
2523 return ENA_COM_INVAL;
2526 old_func = rss->hash_func;
2527 rss->hash_func = func;
2528 rc = ena_com_set_hash_function(ena_dev);
2530 /* Restore the old function */
2532 rss->hash_func = old_func;
2537 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2538 enum ena_admin_hash_functions *func)
2540 struct ena_rss *rss = &ena_dev->rss;
2541 struct ena_admin_get_feat_resp get_resp;
2544 if (unlikely(!func))
2545 return ENA_COM_INVAL;
2547 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2548 ENA_ADMIN_RSS_HASH_FUNCTION,
2549 rss->hash_key_dma_addr,
2550 sizeof(*rss->hash_key), 0);
2554 /* ENA_FFS() returns 1 in case the lsb is set */
2555 rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2559 *func = rss->hash_func;
2564 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2566 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2567 ena_dev->rss.hash_key;
2570 memcpy(key, hash_key->key,
2571 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2576 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2577 enum ena_admin_flow_hash_proto proto,
2580 struct ena_rss *rss = &ena_dev->rss;
2581 struct ena_admin_get_feat_resp get_resp;
2584 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2585 ENA_ADMIN_RSS_HASH_INPUT,
2586 rss->hash_ctrl_dma_addr,
2587 sizeof(*rss->hash_ctrl), 0);
2592 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2597 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2599 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2600 struct ena_rss *rss = &ena_dev->rss;
2601 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2602 struct ena_admin_set_feat_cmd cmd;
2603 struct ena_admin_set_feat_resp resp;
2606 if (!ena_com_check_supported_feature_id(ena_dev,
2607 ENA_ADMIN_RSS_HASH_INPUT)) {
2608 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2609 ENA_ADMIN_RSS_HASH_INPUT);
2610 return ENA_COM_UNSUPPORTED;
2613 memset(&cmd, 0x0, sizeof(cmd));
2615 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2616 cmd.aq_common_descriptor.flags =
2617 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2618 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2619 cmd.u.flow_hash_input.enabled_input_sort =
2620 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2621 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2623 ret = ena_com_mem_addr_set(ena_dev,
2624 &cmd.control_buffer.address,
2625 rss->hash_ctrl_dma_addr);
2626 if (unlikely(ret)) {
2627 ena_trc_err(ena_dev, "Memory address set failed\n");
2630 cmd.control_buffer.length = sizeof(*hash_ctrl);
2632 ret = ena_com_execute_admin_command(admin_queue,
2633 (struct ena_admin_aq_entry *)&cmd,
2635 (struct ena_admin_acq_entry *)&resp,
2638 ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret);
2643 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2645 struct ena_rss *rss = &ena_dev->rss;
2646 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2648 u16 available_fields = 0;
2651 /* Get the supported hash input */
2652 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2656 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2657 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2658 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2660 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2661 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2662 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2664 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2665 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2666 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2668 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2669 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2670 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2672 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2673 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2675 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2676 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2678 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2679 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2681 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2682 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2684 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2685 available_fields = hash_ctrl->selected_fields[i].fields &
2686 hash_ctrl->supported_fields[i].fields;
2687 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2688 ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2689 i, hash_ctrl->supported_fields[i].fields,
2690 hash_ctrl->selected_fields[i].fields);
2691 return ENA_COM_UNSUPPORTED;
2695 rc = ena_com_set_hash_ctrl(ena_dev);
2697 /* In case of failure, restore the old hash ctrl */
2699 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2704 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2705 enum ena_admin_flow_hash_proto proto,
2708 struct ena_rss *rss = &ena_dev->rss;
2709 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2710 u16 supported_fields;
2713 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2714 ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
2715 return ENA_COM_INVAL;
2718 /* Get the ctrl table */
2719 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2723 /* Make sure all the fields are supported */
2724 supported_fields = hash_ctrl->supported_fields[proto].fields;
2725 if ((hash_fields & supported_fields) != hash_fields) {
2726 ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
2727 proto, hash_fields, supported_fields);
2730 hash_ctrl->selected_fields[proto].fields = hash_fields;
2732 rc = ena_com_set_hash_ctrl(ena_dev);
2734 /* In case of failure, restore the old hash ctrl */
2736 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2741 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2742 u16 entry_idx, u16 entry_value)
2744 struct ena_rss *rss = &ena_dev->rss;
2746 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2747 return ENA_COM_INVAL;
2749 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2750 return ENA_COM_INVAL;
2752 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2757 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2759 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2760 struct ena_rss *rss = &ena_dev->rss;
2761 struct ena_admin_set_feat_cmd cmd;
2762 struct ena_admin_set_feat_resp resp;
2765 if (!ena_com_check_supported_feature_id(ena_dev,
2766 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
2767 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2768 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2769 return ENA_COM_UNSUPPORTED;
2772 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2774 ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n");
2778 memset(&cmd, 0x0, sizeof(cmd));
2780 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2781 cmd.aq_common_descriptor.flags =
2782 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2783 cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2784 cmd.u.ind_table.size = rss->tbl_log_size;
2785 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2787 ret = ena_com_mem_addr_set(ena_dev,
2788 &cmd.control_buffer.address,
2789 rss->rss_ind_tbl_dma_addr);
2790 if (unlikely(ret)) {
2791 ena_trc_err(ena_dev, "Memory address set failed\n");
2795 cmd.control_buffer.length = (u32)(1ULL << rss->tbl_log_size) *
2796 sizeof(struct ena_admin_rss_ind_table_entry);
2798 ret = ena_com_execute_admin_command(admin_queue,
2799 (struct ena_admin_aq_entry *)&cmd,
2801 (struct ena_admin_acq_entry *)&resp,
2805 ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret);
2810 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2812 struct ena_rss *rss = &ena_dev->rss;
2813 struct ena_admin_get_feat_resp get_resp;
2817 tbl_size = (u32)(1ULL << rss->tbl_log_size) *
2818 sizeof(struct ena_admin_rss_ind_table_entry);
2820 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2821 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2822 rss->rss_ind_tbl_dma_addr,
2830 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2831 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2836 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2840 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2842 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2846 /* The following function might return unsupported in case the
2847 * device doesn't support setting the key / hash function. We can safely
2848 * ignore this error and have indirection table support only.
2850 rc = ena_com_hash_key_allocate(ena_dev);
2852 ena_com_hash_key_fill_default_key(ena_dev);
2853 else if (rc != ENA_COM_UNSUPPORTED)
2856 rc = ena_com_hash_ctrl_init(ena_dev);
2863 ena_com_hash_key_destroy(ena_dev);
2865 ena_com_indirect_table_destroy(ena_dev);
2871 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2873 ena_com_indirect_table_destroy(ena_dev);
2874 ena_com_hash_key_destroy(ena_dev);
2875 ena_com_hash_ctrl_destroy(ena_dev);
2877 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2880 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2882 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2884 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2886 host_attr->host_info,
2887 host_attr->host_info_dma_addr,
2888 host_attr->host_info_dma_handle);
2889 if (unlikely(!host_attr->host_info))
2890 return ENA_COM_NO_MEM;
2892 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2893 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2894 (ENA_COMMON_SPEC_VERSION_MINOR));
2899 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2900 u32 debug_area_size)
2902 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2904 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2906 host_attr->debug_area_virt_addr,
2907 host_attr->debug_area_dma_addr,
2908 host_attr->debug_area_dma_handle);
2909 if (unlikely(!host_attr->debug_area_virt_addr)) {
2910 host_attr->debug_area_size = 0;
2911 return ENA_COM_NO_MEM;
2914 host_attr->debug_area_size = debug_area_size;
2919 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2921 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2923 if (host_attr->host_info) {
2924 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2926 host_attr->host_info,
2927 host_attr->host_info_dma_addr,
2928 host_attr->host_info_dma_handle);
2929 host_attr->host_info = NULL;
2933 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2935 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2937 if (host_attr->debug_area_virt_addr) {
2938 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2939 host_attr->debug_area_size,
2940 host_attr->debug_area_virt_addr,
2941 host_attr->debug_area_dma_addr,
2942 host_attr->debug_area_dma_handle);
2943 host_attr->debug_area_virt_addr = NULL;
2947 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2949 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2950 struct ena_com_admin_queue *admin_queue;
2951 struct ena_admin_set_feat_cmd cmd;
2952 struct ena_admin_set_feat_resp resp;
2956 /* Host attribute config is called before ena_com_get_dev_attr_feat
2957 * so ena_com can't check if the feature is supported.
2960 memset(&cmd, 0x0, sizeof(cmd));
2961 admin_queue = &ena_dev->admin_queue;
2963 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2964 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2966 ret = ena_com_mem_addr_set(ena_dev,
2967 &cmd.u.host_attr.debug_ba,
2968 host_attr->debug_area_dma_addr);
2969 if (unlikely(ret)) {
2970 ena_trc_err(ena_dev, "Memory address set failed\n");
2974 ret = ena_com_mem_addr_set(ena_dev,
2975 &cmd.u.host_attr.os_info_ba,
2976 host_attr->host_info_dma_addr);
2977 if (unlikely(ret)) {
2978 ena_trc_err(ena_dev, "Memory address set failed\n");
2982 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2984 ret = ena_com_execute_admin_command(admin_queue,
2985 (struct ena_admin_aq_entry *)&cmd,
2987 (struct ena_admin_acq_entry *)&resp,
2991 ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret);
2996 /* Interrupt moderation */
2997 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2999 return ena_com_check_supported_feature_id(ena_dev,
3000 ENA_ADMIN_INTERRUPT_MODERATION);
3003 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
3005 u32 intr_delay_resolution,
3006 u32 *intr_moder_interval)
3008 if (!intr_delay_resolution) {
3009 ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n");
3010 return ENA_COM_FAULT;
3013 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
3018 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
3019 u32 tx_coalesce_usecs)
3021 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3023 ena_dev->intr_delay_resolution,
3024 &ena_dev->intr_moder_tx_interval);
3027 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
3028 u32 rx_coalesce_usecs)
3030 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3032 ena_dev->intr_delay_resolution,
3033 &ena_dev->intr_moder_rx_interval);
3036 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
3038 struct ena_admin_get_feat_resp get_resp;
3039 u16 delay_resolution;
3042 rc = ena_com_get_feature(ena_dev, &get_resp,
3043 ENA_ADMIN_INTERRUPT_MODERATION, 0);
3046 if (rc == ENA_COM_UNSUPPORTED) {
3047 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3048 ENA_ADMIN_INTERRUPT_MODERATION);
3051 ena_trc_err(ena_dev,
3052 "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
3055 /* no moderation supported, disable adaptive support */
3056 ena_com_disable_adaptive_moderation(ena_dev);
3060 /* if moderation is supported by device we set adaptive moderation */
3061 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
3062 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
3064 /* Disable adaptive moderation by default - can be enabled later */
3065 ena_com_disable_adaptive_moderation(ena_dev);
3070 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
3072 return ena_dev->intr_moder_tx_interval;
3075 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
3077 return ena_dev->intr_moder_rx_interval;
3080 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3081 struct ena_admin_feature_llq_desc *llq_features,
3082 struct ena_llq_configurations *llq_default_cfg)
3084 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
3087 if (!llq_features->max_llq_num) {
3088 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3092 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3096 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3097 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
3099 if (unlikely(ena_dev->tx_max_header_size == 0)) {
3100 ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n");
3104 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;