2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #endif /* HAVE_CONFIG_H */
42 #include <netinet/in.h>
45 #include <infiniband/opcode.h>
51 MLX4_CQ_DOORBELL = 0x20
60 #define MLX4_CQ_DB_REQ_NOT_SOL (1 << 24)
61 #define MLX4_CQ_DB_REQ_NOT (2 << 24)
64 MLX4_CQE_OWNER_MASK = 0x80,
65 MLX4_CQE_IS_SEND_MASK = 0x40,
66 MLX4_CQE_OPCODE_MASK = 0x1f
70 MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
71 MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
72 MLX4_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
73 MLX4_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
74 MLX4_CQE_SYNDROME_MW_BIND_ERR = 0x06,
75 MLX4_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
76 MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
77 MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
78 MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
79 MLX4_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
80 MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
81 MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
82 MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
87 uint32_t immed_rss_invalid;
88 uint32_t g_mlpath_rqpn;
97 uint8_t owner_sr_opcode;
100 struct mlx4_err_cqe {
102 uint32_t reserved1[5];
106 uint8_t reserved2[3];
107 uint8_t owner_sr_opcode;
110 static struct mlx4_cqe *get_cqe(struct mlx4_cq *cq, int entry)
112 return cq->buf.buf + entry * MLX4_CQ_ENTRY_SIZE;
115 static void *get_sw_cqe(struct mlx4_cq *cq, int n)
117 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe);
119 return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
120 !!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe;
123 static struct mlx4_cqe *next_cqe_sw(struct mlx4_cq *cq)
125 return get_sw_cqe(cq, cq->cons_index);
128 static void update_cons_index(struct mlx4_cq *cq)
130 *cq->set_ci_db = htonl(cq->cons_index & 0xffffff);
133 static void mlx4_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ibv_wc *wc)
135 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR)
136 printf(PFX "local QP operation err "
137 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
139 htonl(cqe->my_qpn), htonl(cqe->wqe_index),
141 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
143 switch (cqe->syndrome) {
144 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
145 wc->status = IBV_WC_LOC_LEN_ERR;
147 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
148 wc->status = IBV_WC_LOC_QP_OP_ERR;
150 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
151 wc->status = IBV_WC_LOC_PROT_ERR;
153 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
154 wc->status = IBV_WC_WR_FLUSH_ERR;
156 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
157 wc->status = IBV_WC_MW_BIND_ERR;
159 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
160 wc->status = IBV_WC_BAD_RESP_ERR;
162 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
163 wc->status = IBV_WC_LOC_ACCESS_ERR;
165 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
166 wc->status = IBV_WC_REM_INV_REQ_ERR;
168 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
169 wc->status = IBV_WC_REM_ACCESS_ERR;
171 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
172 wc->status = IBV_WC_REM_OP_ERR;
174 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
175 wc->status = IBV_WC_RETRY_EXC_ERR;
177 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
178 wc->status = IBV_WC_RNR_RETRY_EXC_ERR;
180 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
181 wc->status = IBV_WC_REM_ABORT_ERR;
184 wc->status = IBV_WC_GENERAL_ERR;
188 wc->vendor_err = cqe->vendor_err;
191 static int mlx4_poll_one(struct mlx4_cq *cq,
192 struct mlx4_qp **cur_qp,
196 struct mlx4_cqe *cqe;
197 struct mlx4_srq *srq = NULL;
200 uint32_t g_mlpath_rqpn;
205 cqe = next_cqe_sw(cq);
211 VALGRIND_MAKE_MEM_DEFINED(cqe, sizeof *cqe);
214 * Make sure we read CQ entry contents after we've checked the
219 qpn = ntohl(cqe->my_qpn);
221 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
222 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
223 MLX4_CQE_OPCODE_ERROR;
225 if (qpn & MLX4_XRC_QPN_BIT && !is_send) {
226 srqn = ntohl(cqe->g_mlpath_rqpn) & 0xffffff;
228 * We do not have to take the XRC SRQ table lock here,
229 * because CQs will be locked while XRC SRQs are removed
232 srq = mlx4_find_xrc_srq(to_mctx(cq->ibv_cq.context), srqn);
235 } else if (!*cur_qp || (qpn & 0xffffff) != (*cur_qp)->ibv_qp.qp_num) {
237 * We do not have to take the QP table lock here,
238 * because CQs will be locked while QPs are removed
241 *cur_qp = mlx4_find_qp(to_mctx(cq->ibv_cq.context),
247 wc->qp_num = qpn & 0xffffff;
251 wqe_index = ntohs(cqe->wqe_index);
252 wq->tail += (uint16_t) (wqe_index - (uint16_t) wq->tail);
253 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
256 wqe_index = htons(cqe->wqe_index);
257 wc->wr_id = srq->wrid[wqe_index];
258 mlx4_free_srq_wqe(srq, wqe_index);
259 } else if ((*cur_qp)->ibv_qp.srq) {
260 srq = to_msrq((*cur_qp)->ibv_qp.srq);
261 wqe_index = htons(cqe->wqe_index);
262 wc->wr_id = srq->wrid[wqe_index];
263 mlx4_free_srq_wqe(srq, wqe_index);
266 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
271 mlx4_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
275 wc->status = IBV_WC_SUCCESS;
279 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
280 case MLX4_OPCODE_RDMA_WRITE_IMM:
281 wc->wc_flags |= IBV_WC_WITH_IMM;
282 case MLX4_OPCODE_RDMA_WRITE:
283 wc->opcode = IBV_WC_RDMA_WRITE;
285 case MLX4_OPCODE_SEND_IMM:
286 wc->wc_flags |= IBV_WC_WITH_IMM;
287 case MLX4_OPCODE_SEND:
288 wc->opcode = IBV_WC_SEND;
290 case MLX4_OPCODE_RDMA_READ:
291 wc->opcode = IBV_WC_RDMA_READ;
292 wc->byte_len = ntohl(cqe->byte_cnt);
294 case MLX4_OPCODE_ATOMIC_CS:
295 wc->opcode = IBV_WC_COMP_SWAP;
298 case MLX4_OPCODE_ATOMIC_FA:
299 wc->opcode = IBV_WC_FETCH_ADD;
302 case MLX4_OPCODE_BIND_MW:
303 wc->opcode = IBV_WC_BIND_MW;
306 /* assume it's a send completion */
307 wc->opcode = IBV_WC_SEND;
311 wc->byte_len = ntohl(cqe->byte_cnt);
313 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
314 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
315 wc->opcode = IBV_WC_RECV_RDMA_WITH_IMM;
316 wc->wc_flags = IBV_WC_WITH_IMM;
317 wc->imm_data = cqe->immed_rss_invalid;
319 case MLX4_RECV_OPCODE_SEND:
320 wc->opcode = IBV_WC_RECV;
323 case MLX4_RECV_OPCODE_SEND_IMM:
324 wc->opcode = IBV_WC_RECV;
325 wc->wc_flags = IBV_WC_WITH_IMM;
326 wc->imm_data = cqe->immed_rss_invalid;
330 wc->slid = ntohs(cqe->rlid);
331 wc->sl = cqe->sl >> 4;
332 g_mlpath_rqpn = ntohl(cqe->g_mlpath_rqpn);
333 wc->src_qp = g_mlpath_rqpn & 0xffffff;
334 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
335 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IBV_WC_GRH : 0;
336 wc->pkey_index = ntohl(cqe->immed_rss_invalid) & 0x7f;
342 int mlx4_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
344 struct mlx4_cq *cq = to_mcq(ibcq);
345 struct mlx4_qp *qp = NULL;
349 pthread_spin_lock(&cq->lock);
351 for (npolled = 0; npolled < ne; ++npolled) {
352 err = mlx4_poll_one(cq, &qp, wc + npolled);
358 update_cons_index(cq);
360 pthread_spin_unlock(&cq->lock);
362 return err == CQ_POLL_ERR ? err : npolled;
365 int mlx4_arm_cq(struct ibv_cq *ibvcq, int solicited)
367 struct mlx4_cq *cq = to_mcq(ibvcq);
368 uint32_t doorbell[2];
374 ci = cq->cons_index & 0xffffff;
375 cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT;
377 *cq->arm_db = htonl(sn << 28 | cmd | ci);
380 * Make sure that the doorbell record in host memory is
381 * written before ringing the doorbell via PCI MMIO.
385 doorbell[0] = htonl(sn << 28 | cmd | cq->cqn);
386 doorbell[1] = htonl(ci);
388 mlx4_write64(doorbell, to_mctx(ibvcq->context), MLX4_CQ_DOORBELL);
393 void mlx4_cq_event(struct ibv_cq *cq)
395 to_mcq(cq)->arm_sn++;
398 void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
400 struct mlx4_cqe *cqe, *dest;
406 if (srq && srq->ibv_srq.xrc_cq)
410 * First we need to find the current producer index, so we
411 * know where to start cleaning from. It doesn't matter if HW
412 * adds new entries after this loop -- the QP we're worried
413 * about is already in RESET, so the new entries won't come
414 * from our QP and therefore don't need to be checked.
416 for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
417 if (prod_index == cq->cons_index + cq->ibv_cq.cqe)
421 * Now sweep backwards through the CQ, removing CQ entries
422 * that match our QP by copying older entries on top of them.
424 while ((int) --prod_index - (int) cq->cons_index >= 0) {
425 cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
427 (ntohl(cqe->g_mlpath_rqpn & 0xffffff) == srq->srqn) &&
428 !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) {
429 mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index));
431 } else if ((ntohl(cqe->my_qpn) & 0xffffff) == qpn) {
432 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
433 mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index));
436 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe);
437 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
438 memcpy(dest, cqe, sizeof *cqe);
439 dest->owner_sr_opcode = owner_bit |
440 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
445 cq->cons_index += nfreed;
447 * Make sure update of buffer contents is done before
448 * updating consumer index.
451 update_cons_index(cq);
455 void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
457 pthread_spin_lock(&cq->lock);
458 __mlx4_cq_clean(cq, qpn, srq);
459 pthread_spin_unlock(&cq->lock);
462 int mlx4_get_outstanding_cqes(struct mlx4_cq *cq)
466 for (i = cq->cons_index; get_sw_cqe(cq, (i & cq->ibv_cq.cqe)); ++i)
469 return i - cq->cons_index;
472 void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int old_cqe)
474 struct mlx4_cqe *cqe;
478 cqe = get_cqe(cq, (i & old_cqe));
480 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
481 cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
482 (((i + 1) & (cq->ibv_cq.cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
483 memcpy(buf + ((i + 1) & cq->ibv_cq.cqe) * MLX4_CQ_ENTRY_SIZE,
484 cqe, MLX4_CQ_ENTRY_SIZE);
486 cqe = get_cqe(cq, (i & old_cqe));
492 int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent)
494 if (mlx4_alloc_buf(buf, align(nent * MLX4_CQ_ENTRY_SIZE, dev->page_size),
497 memset(buf->buf, 0, nent * MLX4_CQ_ENTRY_SIZE);