4 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef ENA_ETH_COM_H_
35 #define ENA_ETH_COM_H_
37 #if defined(__cplusplus)
42 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
43 #define ENA_COMP_HEAD_THRESH 4
45 struct ena_com_tx_ctx {
46 struct ena_com_tx_meta ena_meta;
47 struct ena_com_buf *ena_bufs;
48 /* For LLQ, header buffer - pushed to the device mem space */
51 enum ena_eth_io_l3_proto_index l3_proto;
52 enum ena_eth_io_l4_proto_index l4_proto;
55 /* For regular queue, indicate the size of the header
56 * For LLQ, indicate the size of the pushed buffer
65 u8 df; /* Don't fragment */
68 struct ena_com_rx_ctx {
69 struct ena_com_rx_buf_info *ena_bufs;
70 enum ena_eth_io_l3_proto_index l3_proto;
71 enum ena_eth_io_l4_proto_index l4_proto;
75 /* fragmented packet */
82 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
83 struct ena_com_tx_ctx *ena_tx_ctx,
86 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
87 struct ena_com_io_sq *io_sq,
88 struct ena_com_rx_ctx *ena_rx_ctx);
90 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
91 struct ena_com_buf *ena_buf,
94 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
96 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
97 struct ena_eth_io_intr_reg *intr_reg)
99 ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
102 static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
104 u16 tail, next_to_comp, cnt;
106 next_to_comp = io_sq->next_to_comp;
108 cnt = tail - next_to_comp;
110 return io_sq->q_depth - 1 - cnt;
113 /* Check if the submission queue has enough space to hold required_buffers */
114 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
115 u16 required_buffers)
119 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
120 return ena_com_free_desc(io_sq) >= required_buffers;
122 /* This calculation doesn't need to be 100% accurate. So to reduce
123 * the calculation overhead just Subtract 2 lines from the free descs
124 * (one for the header line and one to compensate the devision
127 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
129 return ena_com_free_desc(io_sq) > temp;
132 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
133 struct ena_com_tx_ctx *ena_tx_ctx)
135 if (!ena_tx_ctx->meta_valid)
138 return !!memcmp(&io_sq->cached_tx_meta,
139 &ena_tx_ctx->ena_meta,
140 sizeof(struct ena_com_tx_meta));
143 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
145 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
146 io_sq->llq_info.max_entries_in_tx_burst > 0;
149 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
150 struct ena_com_tx_ctx *ena_tx_ctx)
152 struct ena_com_llq_info *llq_info;
153 int descs_after_first_entry;
154 int num_entries_needed = 1;
157 if (!is_llq_max_tx_burst_exists(io_sq))
160 llq_info = &io_sq->llq_info;
161 num_descs = ena_tx_ctx->num_bufs;
163 if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
166 if (num_descs > llq_info->descs_num_before_header) {
167 descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
168 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
169 llq_info->descs_per_entry);
172 ena_trc_dbg("queue: %d num_descs: %d num_entries_needed: %d\n",
173 io_sq->qid, num_descs, num_entries_needed);
175 return num_entries_needed > io_sq->entries_in_tx_burst_left;
178 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
180 u16 tail = io_sq->tail;
181 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
183 ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
186 ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
188 if (is_llq_max_tx_burst_exists(io_sq)) {
189 ena_trc_dbg("reset available entries in tx burst for queue %d to %d\n",
190 io_sq->qid, max_entries_in_tx_burst);
191 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
197 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
199 u16 unreported_comp, head;
203 unreported_comp = head - io_cq->last_head_update;
204 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
206 if (io_cq->cq_head_db_reg && need_update) {
207 ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
209 ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
210 io_cq->last_head_update = head;
216 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
219 struct ena_eth_io_numa_node_cfg_reg numa_cfg;
221 if (!io_cq->numa_node_cfg_reg)
224 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
225 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
227 ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
230 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
232 io_sq->next_to_comp += elem;
235 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
239 /* Switch phase bit in case of wrap around */
240 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
244 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
247 u8 expected_phase, cdesc_phase;
248 struct ena_eth_io_tx_cdesc *cdesc;
251 masked_head = io_cq->head & (io_cq->q_depth - 1);
252 expected_phase = io_cq->phase;
254 cdesc = (struct ena_eth_io_tx_cdesc *)
255 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
256 (masked_head * io_cq->cdesc_entry_size_in_bytes));
258 /* When the current completion descriptor phase isn't the same as the
259 * expected, it mean that the device still didn't update
262 cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
263 if (cdesc_phase != expected_phase)
264 return ENA_COM_TRY_AGAIN;
268 *req_id = READ_ONCE16(cdesc->req_id);
269 if (unlikely(*req_id >= io_cq->q_depth)) {
270 ena_trc_err("Invalid req id %d\n", cdesc->req_id);
271 return ENA_COM_INVAL;
274 ena_com_cq_inc_head(io_cq);
279 #if defined(__cplusplus)
282 #endif /* ENA_ETH_COM_H_ */