4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 /*****************************************************************************/
37 /*****************************************************************************/
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (3000000)
42 #define ENA_ASYNC_QUEUE_DEPTH 16
43 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #ifdef ENA_EXTENDED_STATS
47 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
48 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
49 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
51 #endif /* ENA_EXTENDED_STATS */
53 #define ENA_CTRL_MAJOR 0
54 #define ENA_CTRL_MINOR 0
55 #define ENA_CTRL_SUB_MINOR 1
57 #define MIN_ENA_CTRL_VER \
58 (((ENA_CTRL_MAJOR) << \
59 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
60 ((ENA_CTRL_MINOR) << \
61 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
64 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
65 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
67 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
69 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
71 #define ENA_REGS_ADMIN_INTR_MASK 1
73 #define ENA_MIN_POLL_US 100
75 #define ENA_MAX_POLL_US 5000
77 /*****************************************************************************/
78 /*****************************************************************************/
79 /*****************************************************************************/
84 /* Abort - canceled by the driver */
89 ena_wait_event_t wait_event;
90 struct ena_admin_acq_entry *user_cqe;
92 enum ena_cmd_status status;
93 /* status from the device */
99 struct ena_com_stats_ctx {
100 struct ena_admin_aq_get_stats_cmd get_cmd;
101 struct ena_admin_acq_get_stats_resp get_resp;
104 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
105 struct ena_common_mem_addr *ena_addr,
108 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
109 ena_trc_err("dma address has more bits that the device supports\n");
110 return ENA_COM_INVAL;
113 ena_addr->mem_addr_low = lower_32_bits(addr);
114 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
119 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
121 struct ena_com_admin_sq *sq = &queue->sq;
122 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
124 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
128 ena_trc_err("memory allocation failed\n");
129 return ENA_COM_NO_MEM;
141 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
143 struct ena_com_admin_cq *cq = &queue->cq;
144 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
146 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
150 ena_trc_err("memory allocation failed\n");
151 return ENA_COM_NO_MEM;
160 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
161 struct ena_aenq_handlers *aenq_handlers)
163 struct ena_com_aenq *aenq = &dev->aenq;
164 u32 addr_low, addr_high, aenq_caps;
167 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
168 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
169 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
174 if (!aenq->entries) {
175 ena_trc_err("memory allocation failed\n");
176 return ENA_COM_NO_MEM;
179 aenq->head = aenq->q_depth;
182 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
183 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
185 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
186 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
189 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
190 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
191 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
192 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
193 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
195 if (unlikely(!aenq_handlers)) {
196 ena_trc_err("aenq handlers pointer is NULL\n");
197 return ENA_COM_INVAL;
200 aenq->aenq_handlers = aenq_handlers;
205 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
206 struct ena_comp_ctx *comp_ctx)
208 comp_ctx->occupied = false;
209 ATOMIC32_DEC(&queue->outstanding_cmds);
212 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
213 u16 command_id, bool capture)
215 if (unlikely(command_id >= queue->q_depth)) {
216 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
217 command_id, queue->q_depth);
221 if (unlikely(!queue->comp_ctx)) {
222 ena_trc_err("Completion context is NULL\n");
226 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
227 ena_trc_err("Completion context is occupied\n");
232 ATOMIC32_INC(&queue->outstanding_cmds);
233 queue->comp_ctx[command_id].occupied = true;
236 return &queue->comp_ctx[command_id];
239 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
240 struct ena_admin_aq_entry *cmd,
241 size_t cmd_size_in_bytes,
242 struct ena_admin_acq_entry *comp,
243 size_t comp_size_in_bytes)
245 struct ena_comp_ctx *comp_ctx;
246 u16 tail_masked, cmd_id;
250 queue_size_mask = admin_queue->q_depth - 1;
252 tail_masked = admin_queue->sq.tail & queue_size_mask;
254 /* In case of queue FULL */
255 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
256 if (cnt >= admin_queue->q_depth) {
257 ena_trc_dbg("admin queue is full.\n");
258 admin_queue->stats.out_of_space++;
259 return ERR_PTR(ENA_COM_NO_SPACE);
262 cmd_id = admin_queue->curr_cmd_id;
264 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
265 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
267 cmd->aq_common_descriptor.command_id |= cmd_id &
268 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
270 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
271 if (unlikely(!comp_ctx))
272 return ERR_PTR(ENA_COM_INVAL);
274 comp_ctx->status = ENA_CMD_SUBMITTED;
275 comp_ctx->comp_size = (u32)comp_size_in_bytes;
276 comp_ctx->user_cqe = comp;
277 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
279 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
281 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
283 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
286 admin_queue->sq.tail++;
287 admin_queue->stats.submitted_cmd++;
289 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
290 admin_queue->sq.phase = !admin_queue->sq.phase;
292 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
293 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
294 admin_queue->sq.db_addr);
299 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
301 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
302 struct ena_comp_ctx *comp_ctx;
305 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
306 if (unlikely(!queue->comp_ctx)) {
307 ena_trc_err("memory allocation failed\n");
308 return ENA_COM_NO_MEM;
311 for (i = 0; i < queue->q_depth; i++) {
312 comp_ctx = get_comp_ctxt(queue, i, false);
314 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
320 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
321 struct ena_admin_aq_entry *cmd,
322 size_t cmd_size_in_bytes,
323 struct ena_admin_acq_entry *comp,
324 size_t comp_size_in_bytes)
326 unsigned long flags = 0;
327 struct ena_comp_ctx *comp_ctx;
329 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
330 if (unlikely(!admin_queue->running_state)) {
331 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
332 return ERR_PTR(ENA_COM_NO_DEVICE);
334 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
338 if (IS_ERR(comp_ctx))
339 admin_queue->running_state = false;
340 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
345 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
346 struct ena_com_create_io_ctx *ctx,
347 struct ena_com_io_sq *io_sq)
352 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
354 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
355 io_sq->desc_entry_size =
356 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
357 sizeof(struct ena_eth_io_tx_desc) :
358 sizeof(struct ena_eth_io_rx_desc);
360 size = io_sq->desc_entry_size * io_sq->q_depth;
361 io_sq->bus = ena_dev->bus;
363 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
364 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
366 io_sq->desc_addr.virt_addr,
367 io_sq->desc_addr.phys_addr,
368 io_sq->desc_addr.mem_handle,
371 if (!io_sq->desc_addr.virt_addr) {
372 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
374 io_sq->desc_addr.virt_addr,
375 io_sq->desc_addr.phys_addr,
376 io_sq->desc_addr.mem_handle);
379 if (!io_sq->desc_addr.virt_addr) {
380 ena_trc_err("memory allocation failed\n");
381 return ENA_COM_NO_MEM;
385 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
386 /* Allocate bounce buffers */
387 io_sq->bounce_buf_ctrl.buffer_size =
388 ena_dev->llq_info.desc_list_entry_size;
389 io_sq->bounce_buf_ctrl.buffers_num =
390 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
391 io_sq->bounce_buf_ctrl.next_to_use = 0;
393 size = io_sq->bounce_buf_ctrl.buffer_size *
394 io_sq->bounce_buf_ctrl.buffers_num;
396 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
398 io_sq->bounce_buf_ctrl.base_buffer,
401 if (!io_sq->bounce_buf_ctrl.base_buffer)
402 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
404 if (!io_sq->bounce_buf_ctrl.base_buffer) {
405 ena_trc_err("bounce buffer memory allocation failed\n");
406 return ENA_COM_NO_MEM;
409 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
410 sizeof(io_sq->llq_info));
412 /* Initiate the first bounce buffer */
413 io_sq->llq_buf_ctrl.curr_bounce_buf =
414 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
415 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
416 0x0, io_sq->llq_info.desc_list_entry_size);
417 io_sq->llq_buf_ctrl.descs_left_in_line =
418 io_sq->llq_info.descs_num_before_header;
419 io_sq->disable_meta_caching =
420 io_sq->llq_info.disable_meta_caching;
422 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
423 io_sq->entries_in_tx_burst_left =
424 io_sq->llq_info.max_entries_in_tx_burst;
428 io_sq->next_to_comp = 0;
434 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
435 struct ena_com_create_io_ctx *ctx,
436 struct ena_com_io_cq *io_cq)
441 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
443 /* Use the basic completion descriptor for Rx */
444 io_cq->cdesc_entry_size_in_bytes =
445 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
446 sizeof(struct ena_eth_io_tx_cdesc) :
447 sizeof(struct ena_eth_io_rx_cdesc_base);
449 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
450 io_cq->bus = ena_dev->bus;
452 ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
454 io_cq->cdesc_addr.virt_addr,
455 io_cq->cdesc_addr.phys_addr,
456 io_cq->cdesc_addr.mem_handle,
459 ENA_CDESC_RING_SIZE_ALIGNMENT);
460 if (!io_cq->cdesc_addr.virt_addr) {
461 ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
463 io_cq->cdesc_addr.virt_addr,
464 io_cq->cdesc_addr.phys_addr,
465 io_cq->cdesc_addr.mem_handle,
466 ENA_CDESC_RING_SIZE_ALIGNMENT);
469 if (!io_cq->cdesc_addr.virt_addr) {
470 ena_trc_err("memory allocation failed\n");
471 return ENA_COM_NO_MEM;
480 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
481 struct ena_admin_acq_entry *cqe)
483 struct ena_comp_ctx *comp_ctx;
486 cmd_id = cqe->acq_common_descriptor.command &
487 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
489 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
490 if (unlikely(!comp_ctx)) {
491 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
492 admin_queue->running_state = false;
496 comp_ctx->status = ENA_CMD_COMPLETED;
497 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
499 if (comp_ctx->user_cqe)
500 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
502 if (!admin_queue->polling)
503 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
506 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
508 struct ena_admin_acq_entry *cqe = NULL;
513 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
514 phase = admin_queue->cq.phase;
516 cqe = &admin_queue->cq.entries[head_masked];
518 /* Go over all the completions */
519 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
520 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
521 /* Do not read the rest of the completion entry before the
522 * phase bit was validated
525 ena_com_handle_single_admin_completion(admin_queue, cqe);
529 if (unlikely(head_masked == admin_queue->q_depth)) {
534 cqe = &admin_queue->cq.entries[head_masked];
537 admin_queue->cq.head += comp_num;
538 admin_queue->cq.phase = phase;
539 admin_queue->sq.head += comp_num;
540 admin_queue->stats.completed_cmd += comp_num;
543 static int ena_com_comp_status_to_errno(u8 comp_status)
545 if (unlikely(comp_status != 0))
546 ena_trc_err("admin command failed[%u]\n", comp_status);
548 switch (comp_status) {
549 case ENA_ADMIN_SUCCESS:
551 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
552 return ENA_COM_NO_MEM;
553 case ENA_ADMIN_UNSUPPORTED_OPCODE:
554 return ENA_COM_UNSUPPORTED;
555 case ENA_ADMIN_BAD_OPCODE:
556 case ENA_ADMIN_MALFORMED_REQUEST:
557 case ENA_ADMIN_ILLEGAL_PARAMETER:
558 case ENA_ADMIN_UNKNOWN_ERROR:
559 return ENA_COM_INVAL;
562 return ENA_COM_INVAL;
565 static inline void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
567 delay_us = ENA_MAX32(ENA_MIN_POLL_US, delay_us);
568 delay_us = ENA_MIN32(delay_us * (1 << exp), ENA_MAX_POLL_US);
569 ENA_USLEEP(delay_us);
572 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
573 struct ena_com_admin_queue *admin_queue)
575 unsigned long flags = 0;
580 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
583 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
584 ena_com_handle_admin_completion(admin_queue);
585 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
587 if (comp_ctx->status != ENA_CMD_SUBMITTED)
590 if (ENA_TIME_EXPIRE(timeout)) {
591 ena_trc_err("Wait for completion (polling) timeout\n");
592 /* ENA didn't have any completion */
593 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
594 admin_queue->stats.no_completion++;
595 admin_queue->running_state = false;
596 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
598 ret = ENA_COM_TIMER_EXPIRED;
602 ena_delay_exponential_backoff_us(exp++, admin_queue->ena_dev->ena_min_poll_delay_us);
605 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
606 ena_trc_err("Command was aborted\n");
607 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
608 admin_queue->stats.aborted_cmd++;
609 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
610 ret = ENA_COM_NO_DEVICE;
614 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
615 "Invalid comp status %d\n", comp_ctx->status);
617 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
619 comp_ctxt_release(admin_queue, comp_ctx);
624 * Set the LLQ configurations of the firmware
626 * The driver provides only the enabled feature values to the device,
627 * which in turn, checks if they are supported.
629 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
631 struct ena_com_admin_queue *admin_queue;
632 struct ena_admin_set_feat_cmd cmd;
633 struct ena_admin_set_feat_resp resp;
634 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
637 memset(&cmd, 0x0, sizeof(cmd));
638 admin_queue = &ena_dev->admin_queue;
640 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
641 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
643 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
644 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
645 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
646 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
648 if (llq_info->disable_meta_caching)
649 cmd.u.llq.accel_mode.u.set.enabled_flags |=
650 BIT(ENA_ADMIN_DISABLE_META_CACHING);
652 if (llq_info->max_entries_in_tx_burst)
653 cmd.u.llq.accel_mode.u.set.enabled_flags |=
654 BIT(ENA_ADMIN_LIMIT_TX_BURST);
656 ret = ena_com_execute_admin_command(admin_queue,
657 (struct ena_admin_aq_entry *)&cmd,
659 (struct ena_admin_acq_entry *)&resp,
663 ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
668 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
669 struct ena_admin_feature_llq_desc *llq_features,
670 struct ena_llq_configurations *llq_default_cfg)
672 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
676 memset(llq_info, 0, sizeof(*llq_info));
678 supported_feat = llq_features->header_location_ctrl_supported;
680 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
681 llq_info->header_location_ctrl =
682 llq_default_cfg->llq_header_location;
684 ena_trc_err("Invalid header location control, supported: 0x%x\n",
689 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
690 supported_feat = llq_features->descriptors_stride_ctrl_supported;
691 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
692 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
694 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
695 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
696 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
697 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
699 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
704 ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
705 llq_default_cfg->llq_stride_ctrl,
707 llq_info->desc_stride_ctrl);
710 llq_info->desc_stride_ctrl = 0;
713 supported_feat = llq_features->entry_size_ctrl_supported;
714 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
715 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
716 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
718 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
719 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
720 llq_info->desc_list_entry_size = 128;
721 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
722 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
723 llq_info->desc_list_entry_size = 192;
724 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
725 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
726 llq_info->desc_list_entry_size = 256;
728 ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
732 ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
733 llq_default_cfg->llq_ring_entry_size,
735 llq_info->desc_list_entry_size);
737 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
738 /* The desc list entry size should be whole multiply of 8
739 * This requirement comes from __iowrite64_copy()
741 ena_trc_err("illegal entry size %d\n",
742 llq_info->desc_list_entry_size);
746 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
747 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
748 sizeof(struct ena_eth_io_tx_desc);
750 llq_info->descs_per_entry = 1;
752 supported_feat = llq_features->desc_num_before_header_supported;
753 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
754 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
756 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
757 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
758 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
759 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
760 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
761 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
762 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
763 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
765 ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
770 ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
771 llq_default_cfg->llq_num_decs_before_header,
773 llq_info->descs_num_before_header);
775 /* Check for accelerated queue supported */
776 llq_info->disable_meta_caching =
777 llq_features->accel_mode.u.get.supported_flags &
778 BIT(ENA_ADMIN_DISABLE_META_CACHING);
780 if (llq_features->accel_mode.u.get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
781 llq_info->max_entries_in_tx_burst =
782 llq_features->accel_mode.u.get.max_tx_burst_size /
783 llq_default_cfg->llq_ring_entry_size_value;
785 rc = ena_com_set_llq(ena_dev);
787 ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
792 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
793 struct ena_com_admin_queue *admin_queue)
795 unsigned long flags = 0;
798 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
799 admin_queue->completion_timeout);
801 /* In case the command wasn't completed find out the root cause.
802 * There might be 2 kinds of errors
803 * 1) No completion (timeout reached)
804 * 2) There is completion but the device didn't get any msi-x interrupt.
806 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
807 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
808 ena_com_handle_admin_completion(admin_queue);
809 admin_queue->stats.no_completion++;
810 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
812 if (comp_ctx->status == ENA_CMD_COMPLETED) {
813 ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
814 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
815 /* Check if fallback to polling is enabled */
816 if (admin_queue->auto_polling)
817 admin_queue->polling = true;
819 ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
820 comp_ctx->cmd_opcode, comp_ctx->status);
822 /* Check if shifted to polling mode.
823 * This will happen if there is a completion without an interrupt
824 * and autopolling mode is enabled. Continuing normal execution in such case
826 if (!admin_queue->polling) {
827 admin_queue->running_state = false;
828 ret = ENA_COM_TIMER_EXPIRED;
833 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
835 comp_ctxt_release(admin_queue, comp_ctx);
839 /* This method read the hardware device register through posting writes
840 * and waiting for response
841 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
843 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
845 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
846 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
847 mmio_read->read_resp;
848 u32 mmio_read_reg, ret, i;
849 unsigned long flags = 0;
850 u32 timeout = mmio_read->reg_read_to;
855 timeout = ENA_REG_READ_TIMEOUT;
857 /* If readless is disabled, perform regular read */
858 if (!mmio_read->readless_supported)
859 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
861 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
862 mmio_read->seq_num++;
864 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
865 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
866 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
867 mmio_read_reg |= mmio_read->seq_num &
868 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
870 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
871 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
873 for (i = 0; i < timeout; i++) {
874 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
880 if (unlikely(i == timeout)) {
881 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
886 ret = ENA_MMIO_READ_TIMEOUT;
890 if (read_resp->reg_off != offset) {
891 ena_trc_err("Read failure: wrong offset provided\n");
892 ret = ENA_MMIO_READ_TIMEOUT;
894 ret = read_resp->reg_val;
897 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
902 /* There are two types to wait for completion.
903 * Polling mode - wait until the completion is available.
904 * Async mode - wait on wait queue until the completion is ready
905 * (or the timeout expired).
906 * It is expected that the IRQ called ena_com_handle_admin_completion
907 * to mark the completions.
909 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
910 struct ena_com_admin_queue *admin_queue)
912 if (admin_queue->polling)
913 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
916 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
920 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
921 struct ena_com_io_sq *io_sq)
923 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
924 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
925 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
929 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
931 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
932 direction = ENA_ADMIN_SQ_DIRECTION_TX;
934 direction = ENA_ADMIN_SQ_DIRECTION_RX;
936 destroy_cmd.sq.sq_identity |= (direction <<
937 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
938 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
940 destroy_cmd.sq.sq_idx = io_sq->idx;
941 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
943 ret = ena_com_execute_admin_command(admin_queue,
944 (struct ena_admin_aq_entry *)&destroy_cmd,
946 (struct ena_admin_acq_entry *)&destroy_resp,
947 sizeof(destroy_resp));
949 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
950 ena_trc_err("failed to destroy io sq error: %d\n", ret);
955 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
956 struct ena_com_io_sq *io_sq,
957 struct ena_com_io_cq *io_cq)
961 if (io_cq->cdesc_addr.virt_addr) {
962 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
964 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
966 io_cq->cdesc_addr.virt_addr,
967 io_cq->cdesc_addr.phys_addr,
968 io_cq->cdesc_addr.mem_handle);
970 io_cq->cdesc_addr.virt_addr = NULL;
973 if (io_sq->desc_addr.virt_addr) {
974 size = io_sq->desc_entry_size * io_sq->q_depth;
976 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
978 io_sq->desc_addr.virt_addr,
979 io_sq->desc_addr.phys_addr,
980 io_sq->desc_addr.mem_handle);
982 io_sq->desc_addr.virt_addr = NULL;
985 if (io_sq->bounce_buf_ctrl.base_buffer) {
986 ENA_MEM_FREE(ena_dev->dmadev,
987 io_sq->bounce_buf_ctrl.base_buffer,
988 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
989 io_sq->bounce_buf_ctrl.base_buffer = NULL;
993 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
997 ena_time_t timeout_stamp;
999 /* Convert timeout from resolution of 100ms to us resolution. */
1000 timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
1003 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1005 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
1006 ena_trc_err("Reg read timeout occurred\n");
1007 return ENA_COM_TIMER_EXPIRED;
1010 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
1014 if (ENA_TIME_EXPIRE(timeout_stamp))
1015 return ENA_COM_TIMER_EXPIRED;
1017 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1021 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1022 enum ena_admin_aq_feature_id feature_id)
1024 u32 feature_mask = 1 << feature_id;
1026 /* Device attributes is always supported */
1027 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1028 !(ena_dev->supported_features & feature_mask))
1034 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1035 struct ena_admin_get_feat_resp *get_resp,
1036 enum ena_admin_aq_feature_id feature_id,
1037 dma_addr_t control_buf_dma_addr,
1038 u32 control_buff_size,
1041 struct ena_com_admin_queue *admin_queue;
1042 struct ena_admin_get_feat_cmd get_cmd;
1045 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1046 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
1047 return ENA_COM_UNSUPPORTED;
1050 memset(&get_cmd, 0x0, sizeof(get_cmd));
1051 admin_queue = &ena_dev->admin_queue;
1053 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1055 if (control_buff_size)
1056 get_cmd.aq_common_descriptor.flags =
1057 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1059 get_cmd.aq_common_descriptor.flags = 0;
1061 ret = ena_com_mem_addr_set(ena_dev,
1062 &get_cmd.control_buffer.address,
1063 control_buf_dma_addr);
1064 if (unlikely(ret)) {
1065 ena_trc_err("memory address set failed\n");
1069 get_cmd.control_buffer.length = control_buff_size;
1070 get_cmd.feat_common.feature_version = feature_ver;
1071 get_cmd.feat_common.feature_id = feature_id;
1073 ret = ena_com_execute_admin_command(admin_queue,
1074 (struct ena_admin_aq_entry *)
1077 (struct ena_admin_acq_entry *)
1082 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
1088 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1089 struct ena_admin_get_feat_resp *get_resp,
1090 enum ena_admin_aq_feature_id feature_id,
1093 return ena_com_get_feature_ex(ena_dev,
1101 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1103 return ena_dev->rss.hash_func;
1106 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1108 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1109 (ena_dev->rss).hash_key;
1111 ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1112 /* The key buffer is stored in the device in an array of
1113 * uint32 elements. Therefore the number of elements can be derived
1114 * by dividing the buffer length by the size of each array element.
1115 * In current implementation each element is sized at uint32_t
1116 * so it's actually a division by 4 but if the element size changes,
1117 * there is no need to rewrite this code.
1119 hash_key->keys_num = sizeof(hash_key->key) / sizeof(hash_key->key[0]);
1122 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1124 struct ena_rss *rss = &ena_dev->rss;
1126 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1127 return ENA_COM_UNSUPPORTED;
1129 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1130 sizeof(*rss->hash_key),
1132 rss->hash_key_dma_addr,
1133 rss->hash_key_mem_handle);
1135 if (unlikely(!rss->hash_key))
1136 return ENA_COM_NO_MEM;
1141 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1143 struct ena_rss *rss = &ena_dev->rss;
1146 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1147 sizeof(*rss->hash_key),
1149 rss->hash_key_dma_addr,
1150 rss->hash_key_mem_handle);
1151 rss->hash_key = NULL;
1154 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1156 struct ena_rss *rss = &ena_dev->rss;
1158 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1159 sizeof(*rss->hash_ctrl),
1161 rss->hash_ctrl_dma_addr,
1162 rss->hash_ctrl_mem_handle);
1164 if (unlikely(!rss->hash_ctrl))
1165 return ENA_COM_NO_MEM;
1170 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1172 struct ena_rss *rss = &ena_dev->rss;
1175 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1176 sizeof(*rss->hash_ctrl),
1178 rss->hash_ctrl_dma_addr,
1179 rss->hash_ctrl_mem_handle);
1180 rss->hash_ctrl = NULL;
1183 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1186 struct ena_rss *rss = &ena_dev->rss;
1187 struct ena_admin_get_feat_resp get_resp;
1191 ret = ena_com_get_feature(ena_dev, &get_resp,
1192 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1196 if ((get_resp.u.ind_table.min_size > log_size) ||
1197 (get_resp.u.ind_table.max_size < log_size)) {
1198 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1200 1 << get_resp.u.ind_table.min_size,
1201 1 << get_resp.u.ind_table.max_size);
1202 return ENA_COM_INVAL;
1205 tbl_size = (1ULL << log_size) *
1206 sizeof(struct ena_admin_rss_ind_table_entry);
1208 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1211 rss->rss_ind_tbl_dma_addr,
1212 rss->rss_ind_tbl_mem_handle);
1213 if (unlikely(!rss->rss_ind_tbl))
1216 tbl_size = (1ULL << log_size) * sizeof(u16);
1217 rss->host_rss_ind_tbl =
1218 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1219 if (unlikely(!rss->host_rss_ind_tbl))
1222 rss->tbl_log_size = log_size;
1227 tbl_size = (1ULL << log_size) *
1228 sizeof(struct ena_admin_rss_ind_table_entry);
1230 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1233 rss->rss_ind_tbl_dma_addr,
1234 rss->rss_ind_tbl_mem_handle);
1235 rss->rss_ind_tbl = NULL;
1237 rss->tbl_log_size = 0;
1238 return ENA_COM_NO_MEM;
1241 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1243 struct ena_rss *rss = &ena_dev->rss;
1244 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1245 sizeof(struct ena_admin_rss_ind_table_entry);
1247 if (rss->rss_ind_tbl)
1248 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1251 rss->rss_ind_tbl_dma_addr,
1252 rss->rss_ind_tbl_mem_handle);
1253 rss->rss_ind_tbl = NULL;
1255 if (rss->host_rss_ind_tbl)
1256 ENA_MEM_FREE(ena_dev->dmadev,
1257 rss->host_rss_ind_tbl,
1258 ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1259 rss->host_rss_ind_tbl = NULL;
1262 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1263 struct ena_com_io_sq *io_sq, u16 cq_idx)
1265 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1266 struct ena_admin_aq_create_sq_cmd create_cmd;
1267 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1271 memset(&create_cmd, 0x0, sizeof(create_cmd));
1273 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1275 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1276 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1278 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1280 create_cmd.sq_identity |= (direction <<
1281 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1282 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1284 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1285 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1287 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1288 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1289 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1291 create_cmd.sq_caps_3 |=
1292 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1294 create_cmd.cq_idx = cq_idx;
1295 create_cmd.sq_depth = io_sq->q_depth;
1297 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1298 ret = ena_com_mem_addr_set(ena_dev,
1300 io_sq->desc_addr.phys_addr);
1301 if (unlikely(ret)) {
1302 ena_trc_err("memory address set failed\n");
1307 ret = ena_com_execute_admin_command(admin_queue,
1308 (struct ena_admin_aq_entry *)&create_cmd,
1310 (struct ena_admin_acq_entry *)&cmd_completion,
1311 sizeof(cmd_completion));
1312 if (unlikely(ret)) {
1313 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1317 io_sq->idx = cmd_completion.sq_idx;
1319 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1320 (uintptr_t)cmd_completion.sq_doorbell_offset);
1322 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1323 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1324 + cmd_completion.llq_headers_offset);
1326 io_sq->desc_addr.pbuf_dev_addr =
1327 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1328 cmd_completion.llq_descriptors_offset);
1331 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1336 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1338 struct ena_rss *rss = &ena_dev->rss;
1339 struct ena_com_io_sq *io_sq;
1343 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1344 qid = rss->host_rss_ind_tbl[i];
1345 if (qid >= ENA_TOTAL_NUM_QUEUES)
1346 return ENA_COM_INVAL;
1348 io_sq = &ena_dev->io_sq_queues[qid];
1350 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1351 return ENA_COM_INVAL;
1353 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1359 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1360 u16 intr_delay_resolution)
1362 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1364 if (unlikely(!intr_delay_resolution)) {
1365 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1366 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1370 ena_dev->intr_moder_rx_interval =
1371 ena_dev->intr_moder_rx_interval *
1372 prev_intr_delay_resolution /
1373 intr_delay_resolution;
1376 ena_dev->intr_moder_tx_interval =
1377 ena_dev->intr_moder_tx_interval *
1378 prev_intr_delay_resolution /
1379 intr_delay_resolution;
1381 ena_dev->intr_delay_resolution = intr_delay_resolution;
1384 /*****************************************************************************/
1385 /******************************* API ******************************/
1386 /*****************************************************************************/
1388 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1389 struct ena_admin_aq_entry *cmd,
1391 struct ena_admin_acq_entry *comp,
1394 struct ena_comp_ctx *comp_ctx;
1397 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1399 if (IS_ERR(comp_ctx)) {
1400 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1401 ena_trc_dbg("Failed to submit command [%ld]\n",
1404 ena_trc_err("Failed to submit command [%ld]\n",
1407 return PTR_ERR(comp_ctx);
1410 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1411 if (unlikely(ret)) {
1412 if (admin_queue->running_state)
1413 ena_trc_err("Failed to process command. ret = %d\n",
1416 ena_trc_dbg("Failed to process command. ret = %d\n",
1422 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1423 struct ena_com_io_cq *io_cq)
1425 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1426 struct ena_admin_aq_create_cq_cmd create_cmd;
1427 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1430 memset(&create_cmd, 0x0, sizeof(create_cmd));
1432 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1434 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1435 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1436 create_cmd.cq_caps_1 |=
1437 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1439 create_cmd.msix_vector = io_cq->msix_vector;
1440 create_cmd.cq_depth = io_cq->q_depth;
1442 ret = ena_com_mem_addr_set(ena_dev,
1444 io_cq->cdesc_addr.phys_addr);
1445 if (unlikely(ret)) {
1446 ena_trc_err("memory address set failed\n");
1450 ret = ena_com_execute_admin_command(admin_queue,
1451 (struct ena_admin_aq_entry *)&create_cmd,
1453 (struct ena_admin_acq_entry *)&cmd_completion,
1454 sizeof(cmd_completion));
1455 if (unlikely(ret)) {
1456 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1460 io_cq->idx = cmd_completion.cq_idx;
1462 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1463 cmd_completion.cq_interrupt_unmask_register_offset);
1465 if (cmd_completion.cq_head_db_register_offset)
1466 io_cq->cq_head_db_reg =
1467 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1468 cmd_completion.cq_head_db_register_offset);
1470 if (cmd_completion.numa_node_register_offset)
1471 io_cq->numa_node_cfg_reg =
1472 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1473 cmd_completion.numa_node_register_offset);
1475 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1480 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1481 struct ena_com_io_sq **io_sq,
1482 struct ena_com_io_cq **io_cq)
1484 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1485 ena_trc_err("Invalid queue number %d but the max is %d\n",
1486 qid, ENA_TOTAL_NUM_QUEUES);
1487 return ENA_COM_INVAL;
1490 *io_sq = &ena_dev->io_sq_queues[qid];
1491 *io_cq = &ena_dev->io_cq_queues[qid];
1496 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1498 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1499 struct ena_comp_ctx *comp_ctx;
1502 if (!admin_queue->comp_ctx)
1505 for (i = 0; i < admin_queue->q_depth; i++) {
1506 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1507 if (unlikely(!comp_ctx))
1510 comp_ctx->status = ENA_CMD_ABORTED;
1512 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1516 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1518 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1519 unsigned long flags = 0;
1522 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1523 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1524 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1525 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1526 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1528 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1531 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1532 struct ena_com_io_cq *io_cq)
1534 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1535 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1536 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1539 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1541 destroy_cmd.cq_idx = io_cq->idx;
1542 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1544 ret = ena_com_execute_admin_command(admin_queue,
1545 (struct ena_admin_aq_entry *)&destroy_cmd,
1546 sizeof(destroy_cmd),
1547 (struct ena_admin_acq_entry *)&destroy_resp,
1548 sizeof(destroy_resp));
1550 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1551 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1556 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1558 return ena_dev->admin_queue.running_state;
1561 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1563 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1564 unsigned long flags = 0;
1566 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1567 ena_dev->admin_queue.running_state = state;
1568 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1571 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1573 u16 depth = ena_dev->aenq.q_depth;
1575 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1577 /* Init head_db to mark that all entries in the queue
1578 * are initially available
1580 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1583 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1585 struct ena_com_admin_queue *admin_queue;
1586 struct ena_admin_set_feat_cmd cmd;
1587 struct ena_admin_set_feat_resp resp;
1588 struct ena_admin_get_feat_resp get_resp;
1591 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1593 ena_trc_info("Can't get aenq configuration\n");
1597 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1598 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1599 get_resp.u.aenq.supported_groups,
1601 return ENA_COM_UNSUPPORTED;
1604 memset(&cmd, 0x0, sizeof(cmd));
1605 admin_queue = &ena_dev->admin_queue;
1607 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1608 cmd.aq_common_descriptor.flags = 0;
1609 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1610 cmd.u.aenq.enabled_groups = groups_flag;
1612 ret = ena_com_execute_admin_command(admin_queue,
1613 (struct ena_admin_aq_entry *)&cmd,
1615 (struct ena_admin_acq_entry *)&resp,
1619 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1624 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1626 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1629 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1630 ena_trc_err("Reg read timeout occurred\n");
1631 return ENA_COM_TIMER_EXPIRED;
1634 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1635 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1637 ena_trc_dbg("ENA dma width: %d\n", width);
1639 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1640 ena_trc_err("DMA width illegal value: %d\n", width);
1641 return ENA_COM_INVAL;
1644 ena_dev->dma_addr_bits = width;
1649 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1653 u32 ctrl_ver_masked;
1655 /* Make sure the ENA version and the controller version are at least
1656 * as the driver expects
1658 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1659 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1660 ENA_REGS_CONTROLLER_VERSION_OFF);
1662 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1663 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1664 ena_trc_err("Reg read timeout occurred\n");
1665 return ENA_COM_TIMER_EXPIRED;
1668 ena_trc_info("ena device version: %d.%d\n",
1669 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1670 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1671 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1673 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1674 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1675 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1676 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1677 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1678 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1679 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1680 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1683 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1684 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1685 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1687 /* Validate the ctrl version without the implementation ID */
1688 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1689 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1696 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1698 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1699 struct ena_com_admin_cq *cq = &admin_queue->cq;
1700 struct ena_com_admin_sq *sq = &admin_queue->sq;
1701 struct ena_com_aenq *aenq = &ena_dev->aenq;
1704 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1705 if (admin_queue->comp_ctx)
1706 ENA_MEM_FREE(ena_dev->dmadev,
1707 admin_queue->comp_ctx,
1708 (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1709 admin_queue->comp_ctx = NULL;
1710 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1712 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1713 sq->dma_addr, sq->mem_handle);
1716 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1718 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1719 cq->dma_addr, cq->mem_handle);
1722 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1723 if (ena_dev->aenq.entries)
1724 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1725 aenq->dma_addr, aenq->mem_handle);
1726 aenq->entries = NULL;
1727 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1730 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1735 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1737 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1738 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1739 ena_dev->admin_queue.polling = polling;
1742 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
1744 return ena_dev->admin_queue.polling;
1747 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1750 ena_dev->admin_queue.auto_polling = polling;
1753 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1755 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1757 ENA_SPINLOCK_INIT(mmio_read->lock);
1758 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1759 sizeof(*mmio_read->read_resp),
1760 mmio_read->read_resp,
1761 mmio_read->read_resp_dma_addr,
1762 mmio_read->read_resp_mem_handle);
1763 if (unlikely(!mmio_read->read_resp))
1766 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1768 mmio_read->read_resp->req_id = 0x0;
1769 mmio_read->seq_num = 0x0;
1770 mmio_read->readless_supported = true;
1775 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1776 return ENA_COM_NO_MEM;
1779 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1781 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1783 mmio_read->readless_supported = readless_supported;
1786 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1788 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1790 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1791 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1793 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1794 sizeof(*mmio_read->read_resp),
1795 mmio_read->read_resp,
1796 mmio_read->read_resp_dma_addr,
1797 mmio_read->read_resp_mem_handle);
1799 mmio_read->read_resp = NULL;
1800 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1803 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1805 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1806 u32 addr_low, addr_high;
1808 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1809 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1811 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1812 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1815 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1816 struct ena_aenq_handlers *aenq_handlers)
1818 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1819 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1822 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1824 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1825 ena_trc_err("Reg read timeout occurred\n");
1826 return ENA_COM_TIMER_EXPIRED;
1829 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1830 ena_trc_err("Device isn't ready, abort com init\n");
1831 return ENA_COM_NO_DEVICE;
1834 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1836 admin_queue->bus = ena_dev->bus;
1837 admin_queue->q_dmadev = ena_dev->dmadev;
1838 admin_queue->polling = false;
1839 admin_queue->curr_cmd_id = 0;
1841 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1843 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1845 ret = ena_com_init_comp_ctxt(admin_queue);
1849 ret = ena_com_admin_init_sq(admin_queue);
1853 ret = ena_com_admin_init_cq(admin_queue);
1857 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1858 ENA_REGS_AQ_DB_OFF);
1860 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1861 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1863 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1864 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1866 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1867 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1869 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1870 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1873 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1874 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1875 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1876 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1879 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1880 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1881 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1882 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1884 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1885 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1886 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1890 admin_queue->ena_dev = ena_dev;
1891 admin_queue->running_state = true;
1895 ena_com_admin_destroy(ena_dev);
1900 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1901 struct ena_com_create_io_ctx *ctx)
1903 struct ena_com_io_sq *io_sq;
1904 struct ena_com_io_cq *io_cq;
1907 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1908 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1909 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1910 return ENA_COM_INVAL;
1913 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1914 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1916 memset(io_sq, 0x0, sizeof(*io_sq));
1917 memset(io_cq, 0x0, sizeof(*io_cq));
1920 io_cq->q_depth = ctx->queue_size;
1921 io_cq->direction = ctx->direction;
1922 io_cq->qid = ctx->qid;
1924 io_cq->msix_vector = ctx->msix_vector;
1926 io_sq->q_depth = ctx->queue_size;
1927 io_sq->direction = ctx->direction;
1928 io_sq->qid = ctx->qid;
1930 io_sq->mem_queue_type = ctx->mem_queue_type;
1932 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1933 /* header length is limited to 8 bits */
1934 io_sq->tx_max_header_size =
1935 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1937 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1940 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1944 ret = ena_com_create_io_cq(ena_dev, io_cq);
1948 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1955 ena_com_destroy_io_cq(ena_dev, io_cq);
1957 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1961 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1963 struct ena_com_io_sq *io_sq;
1964 struct ena_com_io_cq *io_cq;
1966 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1967 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1968 qid, ENA_TOTAL_NUM_QUEUES);
1972 io_sq = &ena_dev->io_sq_queues[qid];
1973 io_cq = &ena_dev->io_cq_queues[qid];
1975 ena_com_destroy_io_sq(ena_dev, io_sq);
1976 ena_com_destroy_io_cq(ena_dev, io_cq);
1978 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1981 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1982 struct ena_admin_get_feat_resp *resp)
1984 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1987 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1988 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1990 struct ena_admin_get_feat_resp get_resp;
1993 rc = ena_com_get_feature(ena_dev, &get_resp,
1994 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1998 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1999 sizeof(get_resp.u.dev_attr));
2000 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2002 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2003 rc = ena_com_get_feature(ena_dev, &get_resp,
2004 ENA_ADMIN_MAX_QUEUES_EXT,
2005 ENA_FEATURE_MAX_QUEUE_EXT_VER);
2009 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
2012 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2013 sizeof(get_resp.u.max_queue_ext));
2014 ena_dev->tx_max_header_size =
2015 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
2017 rc = ena_com_get_feature(ena_dev, &get_resp,
2018 ENA_ADMIN_MAX_QUEUES_NUM, 0);
2019 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2020 sizeof(get_resp.u.max_queue));
2021 ena_dev->tx_max_header_size =
2022 get_resp.u.max_queue.max_header_size;
2028 rc = ena_com_get_feature(ena_dev, &get_resp,
2029 ENA_ADMIN_AENQ_CONFIG, 0);
2033 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2034 sizeof(get_resp.u.aenq));
2036 rc = ena_com_get_feature(ena_dev, &get_resp,
2037 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2041 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2042 sizeof(get_resp.u.offload));
2044 /* Driver hints isn't mandatory admin command. So in case the
2045 * command isn't supported set driver hints to 0
2047 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2050 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2051 sizeof(get_resp.u.hw_hints));
2052 else if (rc == ENA_COM_UNSUPPORTED)
2053 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2057 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2059 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2060 sizeof(get_resp.u.llq));
2061 else if (rc == ENA_COM_UNSUPPORTED)
2062 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2066 rc = ena_com_get_feature(ena_dev, &get_resp,
2067 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
2069 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
2070 sizeof(get_resp.u.ind_table));
2071 else if (rc == ENA_COM_UNSUPPORTED)
2072 memset(&get_feat_ctx->ind_table, 0x0,
2073 sizeof(get_feat_ctx->ind_table));
2080 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2082 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2085 /* ena_handle_specific_aenq_event:
2086 * return the handler that is relevant to the specific event group
2088 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2091 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2093 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2094 return aenq_handlers->handlers[group];
2096 return aenq_handlers->unimplemented_handler;
2099 /* ena_aenq_intr_handler:
2100 * handles the aenq incoming events.
2101 * pop events from the queue and apply the specific handler
2103 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2105 struct ena_admin_aenq_entry *aenq_e;
2106 struct ena_admin_aenq_common_desc *aenq_common;
2107 struct ena_com_aenq *aenq = &dev->aenq;
2109 ena_aenq_handler handler_cb;
2110 u16 masked_head, processed = 0;
2113 masked_head = aenq->head & (aenq->q_depth - 1);
2114 phase = aenq->phase;
2115 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2116 aenq_common = &aenq_e->aenq_common_desc;
2118 /* Go over all the events */
2119 while ((READ_ONCE8(aenq_common->flags) &
2120 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2121 /* Make sure the phase bit (ownership) is as expected before
2122 * reading the rest of the descriptor.
2126 timestamp = (u64)aenq_common->timestamp_low |
2127 ((u64)aenq_common->timestamp_high << 32);
2128 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2130 aenq_common->syndrom,
2133 /* Handle specific event*/
2134 handler_cb = ena_com_get_specific_aenq_cb(dev,
2135 aenq_common->group);
2136 handler_cb(data, aenq_e); /* call the actual event handler*/
2138 /* Get next event entry */
2142 if (unlikely(masked_head == aenq->q_depth)) {
2146 aenq_e = &aenq->entries[masked_head];
2147 aenq_common = &aenq_e->aenq_common_desc;
2150 aenq->head += processed;
2151 aenq->phase = phase;
2153 /* Don't update aenq doorbell if there weren't any processed events */
2157 /* write the aenq doorbell after all AENQ descriptors were read */
2159 ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
2160 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2163 #ifdef ENA_EXTENDED_STATS
2165 * Sets the function Idx and Queue Idx to be used for
2166 * get full statistics feature
2169 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
2173 /* Function & Queue is acquired from user in the following format :
2174 * Bottom Half word: funct
2175 * Top Half Word: queue
2177 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2178 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2183 #endif /* ENA_EXTENDED_STATS */
2185 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2186 enum ena_regs_reset_reason_types reset_reason)
2188 u32 stat, timeout, cap, reset_val;
2191 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2192 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2194 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2195 (cap == ENA_MMIO_READ_TIMEOUT))) {
2196 ena_trc_err("Reg read32 timeout occurred\n");
2197 return ENA_COM_TIMER_EXPIRED;
2200 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2201 ena_trc_err("Device isn't ready, can't reset device\n");
2202 return ENA_COM_INVAL;
2205 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2206 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2208 ena_trc_err("Invalid timeout value\n");
2209 return ENA_COM_INVAL;
2213 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2214 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2215 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2216 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2218 /* Write again the MMIO read request address */
2219 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2221 rc = wait_for_reset_state(ena_dev, timeout,
2222 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2224 ena_trc_err("Reset indication didn't turn on\n");
2229 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2230 rc = wait_for_reset_state(ena_dev, timeout, 0);
2232 ena_trc_err("Reset indication didn't turn off\n");
2236 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2237 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2239 /* the resolution of timeout reg is 100ms */
2240 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2242 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2247 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2248 struct ena_com_stats_ctx *ctx,
2249 enum ena_admin_get_stats_type type)
2251 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2252 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2253 struct ena_com_admin_queue *admin_queue;
2256 admin_queue = &ena_dev->admin_queue;
2258 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2259 get_cmd->aq_common_descriptor.flags = 0;
2260 get_cmd->type = type;
2262 ret = ena_com_execute_admin_command(admin_queue,
2263 (struct ena_admin_aq_entry *)get_cmd,
2265 (struct ena_admin_acq_entry *)get_resp,
2269 ena_trc_err("Failed to get stats. error: %d\n", ret);
2274 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2275 struct ena_admin_basic_stats *stats)
2277 struct ena_com_stats_ctx ctx;
2280 memset(&ctx, 0x0, sizeof(ctx));
2281 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2282 if (likely(ret == 0))
2283 memcpy(stats, &ctx.get_resp.basic_stats,
2284 sizeof(ctx.get_resp.basic_stats));
2288 #ifdef ENA_EXTENDED_STATS
2290 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
2293 struct ena_com_stats_ctx ctx;
2294 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
2295 ena_mem_handle_t mem_handle;
2297 dma_addr_t phys_addr;
2300 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2301 virt_addr, phys_addr, mem_handle);
2303 ret = ENA_COM_NO_MEM;
2306 memset(&ctx, 0x0, sizeof(ctx));
2307 ret = ena_com_mem_addr_set(ena_dev,
2308 &get_cmd->u.control_buffer.address,
2310 if (unlikely(ret)) {
2311 ena_trc_err("memory address set failed\n");
2312 goto free_ext_stats_mem;
2314 get_cmd->u.control_buffer.length = len;
2316 get_cmd->device_id = ena_dev->stats_func;
2317 get_cmd->queue_idx = ena_dev->stats_queue;
2319 ret = ena_get_dev_stats(ena_dev, &ctx,
2320 ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
2322 goto free_ext_stats_mem;
2324 ret = snprintf(buff, len, "%s", (char *)virt_addr);
2327 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2334 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2336 struct ena_com_admin_queue *admin_queue;
2337 struct ena_admin_set_feat_cmd cmd;
2338 struct ena_admin_set_feat_resp resp;
2341 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2342 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2343 return ENA_COM_UNSUPPORTED;
2346 memset(&cmd, 0x0, sizeof(cmd));
2347 admin_queue = &ena_dev->admin_queue;
2349 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2350 cmd.aq_common_descriptor.flags = 0;
2351 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2352 cmd.u.mtu.mtu = mtu;
2354 ret = ena_com_execute_admin_command(admin_queue,
2355 (struct ena_admin_aq_entry *)&cmd,
2357 (struct ena_admin_acq_entry *)&resp,
2361 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2366 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2367 struct ena_admin_feature_offload_desc *offload)
2370 struct ena_admin_get_feat_resp resp;
2372 ret = ena_com_get_feature(ena_dev, &resp,
2373 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2374 if (unlikely(ret)) {
2375 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2379 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2384 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2386 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2387 struct ena_rss *rss = &ena_dev->rss;
2388 struct ena_admin_set_feat_cmd cmd;
2389 struct ena_admin_set_feat_resp resp;
2390 struct ena_admin_get_feat_resp get_resp;
2393 if (!ena_com_check_supported_feature_id(ena_dev,
2394 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2395 ena_trc_dbg("Feature %d isn't supported\n",
2396 ENA_ADMIN_RSS_HASH_FUNCTION);
2397 return ENA_COM_UNSUPPORTED;
2400 /* Validate hash function is supported */
2401 ret = ena_com_get_feature(ena_dev, &get_resp,
2402 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2406 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2407 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2409 return ENA_COM_UNSUPPORTED;
2412 memset(&cmd, 0x0, sizeof(cmd));
2414 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2415 cmd.aq_common_descriptor.flags =
2416 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2417 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2418 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2419 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2421 ret = ena_com_mem_addr_set(ena_dev,
2422 &cmd.control_buffer.address,
2423 rss->hash_key_dma_addr);
2424 if (unlikely(ret)) {
2425 ena_trc_err("memory address set failed\n");
2429 cmd.control_buffer.length = sizeof(*rss->hash_key);
2431 ret = ena_com_execute_admin_command(admin_queue,
2432 (struct ena_admin_aq_entry *)&cmd,
2434 (struct ena_admin_acq_entry *)&resp,
2436 if (unlikely(ret)) {
2437 ena_trc_err("Failed to set hash function %d. error: %d\n",
2438 rss->hash_func, ret);
2439 return ENA_COM_INVAL;
2445 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2446 enum ena_admin_hash_functions func,
2447 const u8 *key, u16 key_len, u32 init_val)
2449 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2450 struct ena_admin_get_feat_resp get_resp;
2451 enum ena_admin_hash_functions old_func;
2452 struct ena_rss *rss = &ena_dev->rss;
2455 hash_key = rss->hash_key;
2457 /* Make sure size is a mult of DWs */
2458 if (unlikely(key_len & 0x3))
2459 return ENA_COM_INVAL;
2461 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2462 ENA_ADMIN_RSS_HASH_FUNCTION,
2463 rss->hash_key_dma_addr,
2464 sizeof(*rss->hash_key), 0);
2468 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2469 ena_trc_err("Flow hash function %d isn't supported\n", func);
2470 return ENA_COM_UNSUPPORTED;
2474 case ENA_ADMIN_TOEPLITZ:
2476 if (key_len != sizeof(hash_key->key)) {
2477 ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2478 key_len, sizeof(hash_key->key));
2479 return ENA_COM_INVAL;
2481 memcpy(hash_key->key, key, key_len);
2482 rss->hash_init_val = init_val;
2483 hash_key->keys_num = key_len / sizeof(hash_key->key[0]);
2486 case ENA_ADMIN_CRC32:
2487 rss->hash_init_val = init_val;
2490 ena_trc_err("Invalid hash function (%d)\n", func);
2491 return ENA_COM_INVAL;
2494 old_func = rss->hash_func;
2495 rss->hash_func = func;
2496 rc = ena_com_set_hash_function(ena_dev);
2498 /* Restore the old function */
2500 rss->hash_func = old_func;
2505 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2506 enum ena_admin_hash_functions *func)
2508 struct ena_rss *rss = &ena_dev->rss;
2509 struct ena_admin_get_feat_resp get_resp;
2512 if (unlikely(!func))
2513 return ENA_COM_INVAL;
2515 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2516 ENA_ADMIN_RSS_HASH_FUNCTION,
2517 rss->hash_key_dma_addr,
2518 sizeof(*rss->hash_key), 0);
2522 /* ENA_FFS() returns 1 in case the lsb is set */
2523 rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2527 *func = rss->hash_func;
2532 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2534 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2535 ena_dev->rss.hash_key;
2538 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2543 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2544 enum ena_admin_flow_hash_proto proto,
2547 struct ena_rss *rss = &ena_dev->rss;
2548 struct ena_admin_get_feat_resp get_resp;
2551 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2552 ENA_ADMIN_RSS_HASH_INPUT,
2553 rss->hash_ctrl_dma_addr,
2554 sizeof(*rss->hash_ctrl), 0);
2559 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2564 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2566 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2567 struct ena_rss *rss = &ena_dev->rss;
2568 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2569 struct ena_admin_set_feat_cmd cmd;
2570 struct ena_admin_set_feat_resp resp;
2573 if (!ena_com_check_supported_feature_id(ena_dev,
2574 ENA_ADMIN_RSS_HASH_INPUT)) {
2575 ena_trc_dbg("Feature %d isn't supported\n",
2576 ENA_ADMIN_RSS_HASH_INPUT);
2577 return ENA_COM_UNSUPPORTED;
2580 memset(&cmd, 0x0, sizeof(cmd));
2582 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2583 cmd.aq_common_descriptor.flags =
2584 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2585 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2586 cmd.u.flow_hash_input.enabled_input_sort =
2587 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2588 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2590 ret = ena_com_mem_addr_set(ena_dev,
2591 &cmd.control_buffer.address,
2592 rss->hash_ctrl_dma_addr);
2593 if (unlikely(ret)) {
2594 ena_trc_err("memory address set failed\n");
2597 cmd.control_buffer.length = sizeof(*hash_ctrl);
2599 ret = ena_com_execute_admin_command(admin_queue,
2600 (struct ena_admin_aq_entry *)&cmd,
2602 (struct ena_admin_acq_entry *)&resp,
2605 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2610 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2612 struct ena_rss *rss = &ena_dev->rss;
2613 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2615 u16 available_fields = 0;
2618 /* Get the supported hash input */
2619 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2623 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2624 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2625 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2627 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2628 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2629 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2631 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2632 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2633 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2635 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2636 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2637 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2639 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2640 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2642 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2643 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2645 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2646 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2648 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2649 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2651 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2652 available_fields = hash_ctrl->selected_fields[i].fields &
2653 hash_ctrl->supported_fields[i].fields;
2654 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2655 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2656 i, hash_ctrl->supported_fields[i].fields,
2657 hash_ctrl->selected_fields[i].fields);
2658 return ENA_COM_UNSUPPORTED;
2662 rc = ena_com_set_hash_ctrl(ena_dev);
2664 /* In case of failure, restore the old hash ctrl */
2666 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2671 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2672 enum ena_admin_flow_hash_proto proto,
2675 struct ena_rss *rss = &ena_dev->rss;
2676 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2677 u16 supported_fields;
2680 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2681 ena_trc_err("Invalid proto num (%u)\n", proto);
2682 return ENA_COM_INVAL;
2685 /* Get the ctrl table */
2686 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2690 /* Make sure all the fields are supported */
2691 supported_fields = hash_ctrl->supported_fields[proto].fields;
2692 if ((hash_fields & supported_fields) != hash_fields) {
2693 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2694 proto, hash_fields, supported_fields);
2697 hash_ctrl->selected_fields[proto].fields = hash_fields;
2699 rc = ena_com_set_hash_ctrl(ena_dev);
2701 /* In case of failure, restore the old hash ctrl */
2703 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2708 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2709 u16 entry_idx, u16 entry_value)
2711 struct ena_rss *rss = &ena_dev->rss;
2713 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2714 return ENA_COM_INVAL;
2716 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2717 return ENA_COM_INVAL;
2719 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2724 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2726 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2727 struct ena_rss *rss = &ena_dev->rss;
2728 struct ena_admin_set_feat_cmd cmd;
2729 struct ena_admin_set_feat_resp resp;
2732 if (!ena_com_check_supported_feature_id(ena_dev,
2733 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2734 ena_trc_dbg("Feature %d isn't supported\n",
2735 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2736 return ENA_COM_UNSUPPORTED;
2739 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2741 ena_trc_err("Failed to convert host indirection table to device table\n");
2745 memset(&cmd, 0x0, sizeof(cmd));
2747 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2748 cmd.aq_common_descriptor.flags =
2749 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2750 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2751 cmd.u.ind_table.size = rss->tbl_log_size;
2752 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2754 ret = ena_com_mem_addr_set(ena_dev,
2755 &cmd.control_buffer.address,
2756 rss->rss_ind_tbl_dma_addr);
2757 if (unlikely(ret)) {
2758 ena_trc_err("memory address set failed\n");
2762 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2763 sizeof(struct ena_admin_rss_ind_table_entry);
2765 ret = ena_com_execute_admin_command(admin_queue,
2766 (struct ena_admin_aq_entry *)&cmd,
2768 (struct ena_admin_acq_entry *)&resp,
2772 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2777 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2779 struct ena_rss *rss = &ena_dev->rss;
2780 struct ena_admin_get_feat_resp get_resp;
2784 tbl_size = (1ULL << rss->tbl_log_size) *
2785 sizeof(struct ena_admin_rss_ind_table_entry);
2787 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2788 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2789 rss->rss_ind_tbl_dma_addr,
2797 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2798 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2803 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2807 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2809 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2813 /* The following function might return unsupported in case the
2814 * device doesn't support setting the key / hash function. We can safely
2815 * ignore this error and have indirection table support only.
2817 rc = ena_com_hash_key_allocate(ena_dev);
2819 ena_com_hash_key_fill_default_key(ena_dev);
2820 else if (rc != ENA_COM_UNSUPPORTED)
2823 rc = ena_com_hash_ctrl_init(ena_dev);
2830 ena_com_hash_key_destroy(ena_dev);
2832 ena_com_indirect_table_destroy(ena_dev);
2838 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2840 ena_com_indirect_table_destroy(ena_dev);
2841 ena_com_hash_key_destroy(ena_dev);
2842 ena_com_hash_ctrl_destroy(ena_dev);
2844 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2847 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2849 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2851 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2853 host_attr->host_info,
2854 host_attr->host_info_dma_addr,
2855 host_attr->host_info_dma_handle);
2856 if (unlikely(!host_attr->host_info))
2857 return ENA_COM_NO_MEM;
2859 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2860 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2861 (ENA_COMMON_SPEC_VERSION_MINOR));
2866 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2867 u32 debug_area_size)
2869 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2871 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2873 host_attr->debug_area_virt_addr,
2874 host_attr->debug_area_dma_addr,
2875 host_attr->debug_area_dma_handle);
2876 if (unlikely(!host_attr->debug_area_virt_addr)) {
2877 host_attr->debug_area_size = 0;
2878 return ENA_COM_NO_MEM;
2881 host_attr->debug_area_size = debug_area_size;
2886 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2888 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2890 if (host_attr->host_info) {
2891 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2893 host_attr->host_info,
2894 host_attr->host_info_dma_addr,
2895 host_attr->host_info_dma_handle);
2896 host_attr->host_info = NULL;
2900 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2902 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2904 if (host_attr->debug_area_virt_addr) {
2905 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2906 host_attr->debug_area_size,
2907 host_attr->debug_area_virt_addr,
2908 host_attr->debug_area_dma_addr,
2909 host_attr->debug_area_dma_handle);
2910 host_attr->debug_area_virt_addr = NULL;
2914 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2916 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2917 struct ena_com_admin_queue *admin_queue;
2918 struct ena_admin_set_feat_cmd cmd;
2919 struct ena_admin_set_feat_resp resp;
2923 /* Host attribute config is called before ena_com_get_dev_attr_feat
2924 * so ena_com can't check if the feature is supported.
2927 memset(&cmd, 0x0, sizeof(cmd));
2928 admin_queue = &ena_dev->admin_queue;
2930 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2931 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2933 ret = ena_com_mem_addr_set(ena_dev,
2934 &cmd.u.host_attr.debug_ba,
2935 host_attr->debug_area_dma_addr);
2936 if (unlikely(ret)) {
2937 ena_trc_err("memory address set failed\n");
2941 ret = ena_com_mem_addr_set(ena_dev,
2942 &cmd.u.host_attr.os_info_ba,
2943 host_attr->host_info_dma_addr);
2944 if (unlikely(ret)) {
2945 ena_trc_err("memory address set failed\n");
2949 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2951 ret = ena_com_execute_admin_command(admin_queue,
2952 (struct ena_admin_aq_entry *)&cmd,
2954 (struct ena_admin_acq_entry *)&resp,
2958 ena_trc_err("Failed to set host attributes: %d\n", ret);
2963 /* Interrupt moderation */
2964 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2966 return ena_com_check_supported_feature_id(ena_dev,
2967 ENA_ADMIN_INTERRUPT_MODERATION);
2970 static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2971 u32 intr_delay_resolution,
2972 u32 *intr_moder_interval)
2974 if (!intr_delay_resolution) {
2975 ena_trc_err("Illegal interrupt delay granularity value\n");
2976 return ENA_COM_FAULT;
2979 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2985 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2986 u32 tx_coalesce_usecs)
2988 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2989 ena_dev->intr_delay_resolution,
2990 &ena_dev->intr_moder_tx_interval);
2993 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2994 u32 rx_coalesce_usecs)
2996 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2997 ena_dev->intr_delay_resolution,
2998 &ena_dev->intr_moder_rx_interval);
3001 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
3003 struct ena_admin_get_feat_resp get_resp;
3004 u16 delay_resolution;
3007 rc = ena_com_get_feature(ena_dev, &get_resp,
3008 ENA_ADMIN_INTERRUPT_MODERATION, 0);
3011 if (rc == ENA_COM_UNSUPPORTED) {
3012 ena_trc_dbg("Feature %d isn't supported\n",
3013 ENA_ADMIN_INTERRUPT_MODERATION);
3016 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
3020 /* no moderation supported, disable adaptive support */
3021 ena_com_disable_adaptive_moderation(ena_dev);
3025 /* if moderation is supported by device we set adaptive moderation */
3026 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
3027 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
3029 /* Disable adaptive moderation by default - can be enabled later */
3030 ena_com_disable_adaptive_moderation(ena_dev);
3035 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
3037 return ena_dev->intr_moder_tx_interval;
3040 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
3042 return ena_dev->intr_moder_rx_interval;
3045 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3046 struct ena_admin_feature_llq_desc *llq_features,
3047 struct ena_llq_configurations *llq_default_cfg)
3049 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
3052 if (!llq_features->max_llq_num) {
3053 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3057 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3061 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3062 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
3064 if (unlikely(ena_dev->tx_max_header_size == 0)) {
3065 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
3069 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;