1 /**************************************************************************
3 Copyright (c) 2007, 2008 Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
30 ***************************************************************************/
34 #define T3_MAX_INLINE 64
36 #define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
37 #define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
39 #define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
40 #define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
41 #define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
42 #define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
45 ring_doorbell(void /* __iomem */ *doorbell, u32 qpid)
47 writel(doorbell, ((1<<31) | qpid));
50 #define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
53 T3_COMPLETION_FLAG = 0x01,
54 T3_NOTIFY_FLAG = 0x02,
55 T3_SOLICITED_EVENT_FLAG = 0x04,
56 T3_READ_FENCE_FLAG = 0x08,
57 T3_LOCAL_FENCE_FLAG = 0x10
58 } __attribute__ ((packed));
61 T3_WR_BP = FW_WROPCODE_RI_BYPASS,
62 T3_WR_SEND = FW_WROPCODE_RI_SEND,
63 T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
64 T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
65 T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
66 T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
67 T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
68 T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
69 T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
70 } __attribute__ ((packed));
73 T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
81 T3_RDMA_INIT, /* CHELSIO RI specific ... */
87 } __attribute__ ((packed));
89 static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
92 case T3_WR_BP: return T3_BYPASS;
93 case T3_WR_SEND: return T3_SEND;
94 case T3_WR_WRITE: return T3_RDMA_WRITE;
95 case T3_WR_READ: return T3_READ_REQ;
96 case T3_WR_INV_STAG: return T3_LOCAL_INV;
97 case T3_WR_BIND: return T3_BIND_MW;
98 case T3_WR_INIT: return T3_RDMA_INIT;
99 case T3_WR_QP_MOD: return T3_QP_MOD;
106 /* Work request id */
115 #define WRID(wrid) (wrid.id1)
116 #define WRID_GEN(wrid) (wrid.id0.wr_gen)
117 #define WRID_IDX(wrid) (wrid.id0.wr_idx)
118 #define WRID_LO(wrid) (wrid.id0.wr_lo)
121 __be32 op_seop_flags;
125 #define S_FW_RIWR_OP 24
126 #define M_FW_RIWR_OP 0xff
127 #define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
128 #define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
130 #define S_FW_RIWR_SOPEOP 22
131 #define M_FW_RIWR_SOPEOP 0x3
132 #define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
134 #define S_FW_RIWR_FLAGS 8
135 #define M_FW_RIWR_FLAGS 0x3fffff
136 #define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
137 #define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
139 #define S_FW_RIWR_TID 8
140 #define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
142 #define S_FW_RIWR_LEN 0
143 #define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
145 #define S_FW_RIWR_GEN 31
146 #define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
154 /* If num_sgle is zero, flit 5+ contains immediate data.*/
156 struct fw_riwrh wrh; /* 0 */
157 union t3_wrid wrid; /* 1 */
164 struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
167 struct t3_local_inv_wr {
168 struct fw_riwrh wrh; /* 0 */
169 union t3_wrid wrid; /* 1 */
174 struct t3_rdma_write_wr {
175 struct fw_riwrh wrh; /* 0 */
176 union t3_wrid wrid; /* 1 */
180 __be64 to_sink; /* 3 */
183 struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
186 struct t3_rdma_read_wr {
187 struct fw_riwrh wrh; /* 0 */
188 union t3_wrid wrid; /* 1 */
192 __be64 rem_to; /* 3 */
193 __be32 local_stag; /* 4 */
195 __be64 local_to; /* 5 */
199 T3_VA_BASED_TO = 0x0,
200 T3_ZERO_BASED_TO = 0x1
201 } __attribute__ ((packed));
204 T3_MEM_ACCESS_LOCAL_READ = 0x1,
205 T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
206 T3_MEM_ACCESS_REM_READ = 0x4,
207 T3_MEM_ACCESS_REM_WRITE = 0x8
208 } __attribute__ ((packed));
210 struct t3_bind_mw_wr {
211 struct fw_riwrh wrh; /* 0 */
212 union t3_wrid wrid; /* 1 */
213 u16 reserved; /* 2 */
217 __be32 mw_stag; /* 3 */
219 __be64 mw_va; /* 4 */
220 __be32 mr_pbl_addr; /* 5 */
225 struct t3_receive_wr {
226 struct fw_riwrh wrh; /* 0 */
227 union t3_wrid wrid; /* 1 */
228 u8 pagesz[T3_MAX_SGE];
229 __be32 num_sgle; /* 2 */
230 struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
231 __be32 pbl_addr[T3_MAX_SGE];
234 struct t3_bypass_wr {
236 union t3_wrid wrid; /* 1 */
239 struct t3_modify_qp_wr {
240 struct fw_riwrh wrh; /* 0 */
241 union t3_wrid wrid; /* 1 */
242 __be32 flags; /* 2 */
243 __be32 quiesce; /* 2 */
244 __be32 max_ird; /* 3 */
245 __be32 max_ord; /* 3 */
246 __be64 sge_cmd; /* 4 */
251 enum t3_modify_qp_flags {
252 MODQP_QUIESCE = 0x01,
253 MODQP_MAX_IRD = 0x02,
254 MODQP_MAX_ORD = 0x04,
255 MODQP_WRITE_EC = 0x08,
256 MODQP_READ_EC = 0x10,
261 uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
262 uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
263 uP_RI_MPA_CRC_ENABLE = 0x4,
264 uP_RI_MPA_IETF_ENABLE = 0x8
265 } __attribute__ ((packed));
268 uP_RI_QP_RDMA_READ_ENABLE = 0x01,
269 uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
270 uP_RI_QP_BIND_ENABLE = 0x04,
271 uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
272 uP_RI_QP_STAG0_ENABLE = 0x10
273 } __attribute__ ((packed));
275 struct t3_rdma_init_attr {
283 enum t3_mpa_attrs mpaattrs;
284 enum t3_qp_caps qpcaps;
294 struct t3_rdma_init_wr {
295 struct fw_riwrh wrh; /* 0 */
296 union t3_wrid wrid; /* 1 */
299 __be32 scqid; /* 3 */
301 __be32 rq_addr; /* 4 */
306 __be32 flags; /* bits 31-1 - reservered */
307 /* bit 0 - set if RECV posted */
310 __be64 qp_dma_addr; /* 7 */
311 __be32 qp_dma_size; /* 8 */
320 enum rdma_init_wr_flags {
325 struct t3_send_wr send;
326 struct t3_rdma_write_wr write;
327 struct t3_rdma_read_wr read;
328 struct t3_receive_wr recv;
329 struct t3_local_inv_wr local_inv;
330 struct t3_bind_mw_wr bind;
331 struct t3_bypass_wr bypass;
332 struct t3_rdma_init_wr init;
333 struct t3_modify_qp_wr qp_mod;
334 struct t3_genbit genbit;
338 #define T3_SQ_CQE_FLIT 13
339 #define T3_SQ_COOKIE_FLIT 14
341 #define T3_RQ_COOKIE_FLIT 13
342 #define T3_RQ_CQE_FLIT 14
344 static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
346 return G_FW_RIWR_OP(be32toh(wqe->op_seop_flags));
349 static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
350 enum t3_wr_flags flags, u8 genbit, u32 tid,
353 wqe->op_seop_flags = htobe32(V_FW_RIWR_OP(op) |
354 V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
355 V_FW_RIWR_FLAGS(flags));
357 wqe->gen_tid_len = htobe32(V_FW_RIWR_GEN(genbit) |
361 ((union t3_wr *)wqe)->genbit.genbit = htobe64(genbit);
365 * T3 ULP2_TX commands
372 /* T3 MC7 RDMA TPT entry format */
375 TPT_NON_SHARED_MR = 0x0,
378 TPT_MW_RELAXED_PROTECTION = 0x3
387 TPT_LOCAL_READ = 0x8,
388 TPT_LOCAL_WRITE = 0x4,
389 TPT_REMOTE_READ = 0x2,
390 TPT_REMOTE_WRITE = 0x1
394 __be32 valid_stag_pdid;
395 __be32 flags_pagesize_qpid;
397 __be32 rsvd_pbl_addr;
400 __be32 va_low_or_fbo;
402 __be32 rsvd_bind_cnt_or_pstag;
403 __be32 rsvd_pbl_size;
406 #define S_TPT_VALID 31
407 #define V_TPT_VALID(x) ((x) << S_TPT_VALID)
408 #define F_TPT_VALID V_TPT_VALID(1U)
410 #define S_TPT_STAG_KEY 23
411 #define M_TPT_STAG_KEY 0xFF
412 #define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
413 #define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
415 #define S_TPT_STAG_STATE 22
416 #define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
417 #define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
419 #define S_TPT_STAG_TYPE 20
420 #define M_TPT_STAG_TYPE 0x3
421 #define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
422 #define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
425 #define M_TPT_PDID 0xFFFFF
426 #define V_TPT_PDID(x) ((x) << S_TPT_PDID)
427 #define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
429 #define S_TPT_PERM 28
430 #define M_TPT_PERM 0xF
431 #define V_TPT_PERM(x) ((x) << S_TPT_PERM)
432 #define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
434 #define S_TPT_REM_INV_DIS 27
435 #define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
436 #define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
438 #define S_TPT_ADDR_TYPE 26
439 #define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
440 #define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
442 #define S_TPT_MW_BIND_ENABLE 25
443 #define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
444 #define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
446 #define S_TPT_PAGE_SIZE 20
447 #define M_TPT_PAGE_SIZE 0x1F
448 #define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
449 #define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
451 #define S_TPT_PBL_ADDR 0
452 #define M_TPT_PBL_ADDR 0x1FFFFFFF
453 #define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
454 #define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
457 #define M_TPT_QPID 0xFFFFF
458 #define V_TPT_QPID(x) ((x) << S_TPT_QPID)
459 #define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
461 #define S_TPT_PSTAG 0
462 #define M_TPT_PSTAG 0xFFFFFF
463 #define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
464 #define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
466 #define S_TPT_PBL_SIZE 0
467 #define M_TPT_PBL_SIZE 0xFFFFF
468 #define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
469 #define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
490 #define M_CQE_OOO 0x1
491 #define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
492 #define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
494 #define S_CQE_QPID 12
495 #define M_CQE_QPID 0x7FFFF
496 #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
497 #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
499 #define S_CQE_SWCQE 11
500 #define M_CQE_SWCQE 0x1
501 #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
502 #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
504 #define S_CQE_GENBIT 10
505 #define M_CQE_GENBIT 0x1
506 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
507 #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
509 #define S_CQE_STATUS 5
510 #define M_CQE_STATUS 0x1F
511 #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
512 #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
515 #define M_CQE_TYPE 0x1
516 #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
517 #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
519 #define S_CQE_OPCODE 0
520 #define M_CQE_OPCODE 0xF
521 #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
522 #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
524 #define SW_CQE(x) (G_CQE_SWCQE(be32toh((x).header)))
525 #define CQE_OOO(x) (G_CQE_OOO(be32toh((x).header)))
526 #define CQE_QPID(x) (G_CQE_QPID(be32toh((x).header)))
527 #define CQE_GENBIT(x) (G_CQE_GENBIT(be32toh((x).header)))
528 #define CQE_TYPE(x) (G_CQE_TYPE(be32toh((x).header)))
529 #define SQ_TYPE(x) (CQE_TYPE((x)))
530 #define RQ_TYPE(x) (!CQE_TYPE((x)))
531 #define CQE_STATUS(x) (G_CQE_STATUS(be32toh((x).header)))
532 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32toh((x).header)))
534 #define CQE_LEN(x) (be32toh((x).len))
536 /* used for RQ completion processing */
537 #define CQE_WRID_STAG(x) (be32toh((x).u.rcqe.stag))
538 #define CQE_WRID_MSN(x) (be32toh((x).u.rcqe.msn))
540 /* used for SQ completion processing */
541 #define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
542 #define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
544 /* generic accessor macros */
545 #define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
546 #define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
548 #define TPT_ERR_SUCCESS 0x0
549 #define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
550 /* STAG is offlimt, being 0, */
551 /* or STAG_key mismatch */
552 #define TPT_ERR_PDID 0x2 /* PDID mismatch */
553 #define TPT_ERR_QPID 0x3 /* QPID mismatch */
554 #define TPT_ERR_ACCESS 0x4 /* Invalid access right */
555 #define TPT_ERR_WRAP 0x5 /* Wrap error */
556 #define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
557 #define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
558 /* shared memory region */
559 #define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
560 /* shared memory region */
561 #define TPT_ERR_ECC 0x9 /* ECC error detected */
562 #define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
563 /* reading PSTAG for a MW */
565 #define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
567 #define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
568 #define TPT_ERR_CRC 0x10 /* CRC error */
569 #define TPT_ERR_MARKER 0x11 /* Marker error */
570 #define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
571 #define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
572 #define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
573 #define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
574 #define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
575 #define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
576 #define TPT_ERR_MSN 0x18 /* MSN error */
577 #define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
578 #define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
580 #define TPT_ERR_MSN_GAP 0x1B
581 #define TPT_ERR_MSN_RANGE 0x1C
582 #define TPT_ERR_IRD_OVERFLOW 0x1D
583 #define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
585 #define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
599 * A T3 WQ implements both the SQ and RQ.
602 union t3_wr *queue; /* DMA accessable memory */
603 bus_addr_t dma_addr; /* DMA address for HW */
605 DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */
607 u32 error; /* 1 once we go to ERROR */
609 u32 wptr; /* idx to next available WR slot */
610 u32 size_log2; /* total wq size */
611 struct t3_swsq *sq; /* SW SQ */
612 struct t3_swsq *oldest_read; /* tracks oldest pending read */
613 u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
614 u32 sq_rptr; /* pending wrs */
615 u32 sq_size_log2; /* sq size */
616 u64 *rq; /* SW RQ (holds consumer wr_ids */
617 u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
618 u32 rq_rptr; /* pending wrs */
619 u64 *rq_oldest_wr; /* oldest wr on the SW RQ */
620 u32 rq_size_log2; /* rq size */
621 u32 rq_addr; /* rq adapter address */
622 void /* __iomem */ *doorbell; /* kernel db */
623 u64 udb; /* user db if any */
633 DECLARE_PCI_UNMAP_ADDR(mapping)
635 struct t3_cqe *queue;
636 struct t3_cqe *sw_queue;
641 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
644 static inline void cxio_set_wq_in_error(struct t3_wq *wq)
646 wq->queue->flit[13] = 1;
649 static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
653 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
654 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
659 static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
663 if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
664 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
670 static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
674 if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
675 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
678 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
679 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))