2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef ENA_ETH_COM_H_
35 #define ENA_ETH_COM_H_
37 #if defined(__cplusplus)
42 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
43 #define ENA_COMP_HEAD_THRESH 4
45 struct ena_com_tx_ctx {
46 struct ena_com_tx_meta ena_meta;
47 struct ena_com_buf *ena_bufs;
48 /* For LLQ, header buffer - pushed to the device mem space */
51 enum ena_eth_io_l3_proto_index l3_proto;
52 enum ena_eth_io_l4_proto_index l4_proto;
55 /* For regular queue, indicate the size of the header
56 * For LLQ, indicate the size of the pushed buffer
65 u8 df; /* Don't fragment */
68 struct ena_com_rx_ctx {
69 struct ena_com_rx_buf_info *ena_bufs;
70 enum ena_eth_io_l3_proto_index l3_proto;
71 enum ena_eth_io_l4_proto_index l4_proto;
75 /* fragmented packet */
83 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
84 struct ena_com_tx_ctx *ena_tx_ctx,
87 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
88 struct ena_com_io_sq *io_sq,
89 struct ena_com_rx_ctx *ena_rx_ctx);
91 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
92 struct ena_com_buf *ena_buf,
95 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
97 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
98 struct ena_eth_io_intr_reg *intr_reg)
100 ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
103 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
105 u16 tail, next_to_comp, cnt;
107 next_to_comp = io_sq->next_to_comp;
109 cnt = tail - next_to_comp;
111 return io_sq->q_depth - 1 - cnt;
114 /* Check if the submission queue has enough space to hold required_buffers */
115 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
116 u16 required_buffers)
120 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
121 return ena_com_free_q_entries(io_sq) >= required_buffers;
123 /* This calculation doesn't need to be 100% accurate. So to reduce
124 * the calculation overhead just Subtract 2 lines from the free descs
125 * (one for the header line and one to compensate the devision
128 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
130 return ena_com_free_q_entries(io_sq) > temp;
133 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
134 struct ena_com_tx_ctx *ena_tx_ctx)
136 if (!ena_tx_ctx->meta_valid)
139 return !!memcmp(&io_sq->cached_tx_meta,
140 &ena_tx_ctx->ena_meta,
141 sizeof(struct ena_com_tx_meta));
144 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
146 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
147 io_sq->llq_info.max_entries_in_tx_burst > 0;
150 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
151 struct ena_com_tx_ctx *ena_tx_ctx)
153 struct ena_com_llq_info *llq_info;
154 int descs_after_first_entry;
155 int num_entries_needed = 1;
158 if (!is_llq_max_tx_burst_exists(io_sq))
161 llq_info = &io_sq->llq_info;
162 num_descs = ena_tx_ctx->num_bufs;
164 if (llq_info->disable_meta_caching ||
165 unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
168 if (num_descs > llq_info->descs_num_before_header) {
169 descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
170 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
171 llq_info->descs_per_entry);
174 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
175 "Queue: %d num_descs: %d num_entries_needed: %d\n",
176 io_sq->qid, num_descs, num_entries_needed);
178 return num_entries_needed > io_sq->entries_in_tx_burst_left;
181 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
183 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
184 u16 tail = io_sq->tail;
186 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
187 "Write submission queue doorbell for queue: %d tail: %d\n",
190 ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
192 if (is_llq_max_tx_burst_exists(io_sq)) {
193 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
194 "Reset available entries in tx burst for queue %d to %d\n",
195 io_sq->qid, max_entries_in_tx_burst);
196 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
202 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
204 u16 unreported_comp, head;
207 if (unlikely(io_cq->cq_head_db_reg)) {
209 unreported_comp = head - io_cq->last_head_update;
210 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
212 if (unlikely(need_update)) {
213 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
214 "Write completion queue doorbell for queue %d: head: %d\n",
216 ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
217 io_cq->last_head_update = head;
224 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
227 struct ena_eth_io_numa_node_cfg_reg numa_cfg;
229 if (!io_cq->numa_node_cfg_reg)
232 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
233 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
235 ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
238 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
240 io_sq->next_to_comp += elem;
243 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
247 /* Switch phase bit in case of wrap around */
248 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
252 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
255 u8 expected_phase, cdesc_phase;
256 struct ena_eth_io_tx_cdesc *cdesc;
259 masked_head = io_cq->head & (io_cq->q_depth - 1);
260 expected_phase = io_cq->phase;
262 cdesc = (struct ena_eth_io_tx_cdesc *)
263 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
264 (masked_head * io_cq->cdesc_entry_size_in_bytes));
266 /* When the current completion descriptor phase isn't the same as the
267 * expected, it mean that the device still didn't update
270 cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
271 if (cdesc_phase != expected_phase)
272 return ENA_COM_TRY_AGAIN;
276 *req_id = READ_ONCE16(cdesc->req_id);
277 if (unlikely(*req_id >= io_cq->q_depth)) {
278 ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
279 "Invalid req id %d\n", cdesc->req_id);
280 return ENA_COM_INVAL;
283 ena_com_cq_inc_head(io_cq);
288 #if defined(__cplusplus)
291 #endif /* ENA_ETH_COM_H_ */