1 /**************************************************************************
3 Copyright (c) 2007, 2008 Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
30 ***************************************************************************/
34 #define T3_MAX_INLINE 64
35 #define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
36 #define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
37 #define T3_STAG0_PAGE_SHIFT 15
39 #define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
40 #define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
42 #define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
43 #define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
44 #define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
45 #define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
48 ring_doorbell(void /* __iomem */ *doorbell, u32 qpid)
50 writel(doorbell, ((1<<31) | qpid));
53 #define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
56 T3_COMPLETION_FLAG = 0x01,
57 T3_NOTIFY_FLAG = 0x02,
58 T3_SOLICITED_EVENT_FLAG = 0x04,
59 T3_READ_FENCE_FLAG = 0x08,
60 T3_LOCAL_FENCE_FLAG = 0x10
61 } __attribute__ ((packed));
64 T3_WR_BP = FW_WROPCODE_RI_BYPASS,
65 T3_WR_SEND = FW_WROPCODE_RI_SEND,
66 T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
67 T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
68 T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
69 T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
70 T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
71 T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
72 T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
73 } __attribute__ ((packed));
76 T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
84 T3_RDMA_INIT, /* CHELSIO RI specific ... */
90 } __attribute__ ((packed));
92 static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
95 case T3_WR_BP: return T3_BYPASS;
96 case T3_WR_SEND: return T3_SEND;
97 case T3_WR_WRITE: return T3_RDMA_WRITE;
98 case T3_WR_READ: return T3_READ_REQ;
99 case T3_WR_INV_STAG: return T3_LOCAL_INV;
100 case T3_WR_BIND: return T3_BIND_MW;
101 case T3_WR_INIT: return T3_RDMA_INIT;
102 case T3_WR_QP_MOD: return T3_QP_MOD;
109 /* Work request id */
118 #define WRID(wrid) (wrid.id1)
119 #define WRID_GEN(wrid) (wrid.id0.wr_gen)
120 #define WRID_IDX(wrid) (wrid.id0.wr_idx)
121 #define WRID_LO(wrid) (wrid.id0.wr_lo)
124 __be32 op_seop_flags;
128 #define S_FW_RIWR_OP 24
129 #define M_FW_RIWR_OP 0xff
130 #define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
131 #define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
133 #define S_FW_RIWR_SOPEOP 22
134 #define M_FW_RIWR_SOPEOP 0x3
135 #define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
137 #define S_FW_RIWR_FLAGS 8
138 #define M_FW_RIWR_FLAGS 0x3fffff
139 #define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
140 #define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
142 #define S_FW_RIWR_TID 8
143 #define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
145 #define S_FW_RIWR_LEN 0
146 #define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
148 #define S_FW_RIWR_GEN 31
149 #define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
157 /* If num_sgle is zero, flit 5+ contains immediate data.*/
159 struct fw_riwrh wrh; /* 0 */
160 union t3_wrid wrid; /* 1 */
167 struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
170 struct t3_local_inv_wr {
171 struct fw_riwrh wrh; /* 0 */
172 union t3_wrid wrid; /* 1 */
177 struct t3_rdma_write_wr {
178 struct fw_riwrh wrh; /* 0 */
179 union t3_wrid wrid; /* 1 */
183 __be64 to_sink; /* 3 */
186 struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
189 struct t3_rdma_read_wr {
190 struct fw_riwrh wrh; /* 0 */
191 union t3_wrid wrid; /* 1 */
195 __be64 rem_to; /* 3 */
196 __be32 local_stag; /* 4 */
198 __be64 local_to; /* 5 */
202 T3_VA_BASED_TO = 0x0,
203 T3_ZERO_BASED_TO = 0x1
204 } __attribute__ ((packed));
207 T3_MEM_ACCESS_LOCAL_READ = 0x1,
208 T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
209 T3_MEM_ACCESS_REM_READ = 0x4,
210 T3_MEM_ACCESS_REM_WRITE = 0x8
211 } __attribute__ ((packed));
213 struct t3_bind_mw_wr {
214 struct fw_riwrh wrh; /* 0 */
215 union t3_wrid wrid; /* 1 */
216 u16 reserved; /* 2 */
220 __be32 mw_stag; /* 3 */
222 __be64 mw_va; /* 4 */
223 __be32 mr_pbl_addr; /* 5 */
228 struct t3_receive_wr {
229 struct fw_riwrh wrh; /* 0 */
230 union t3_wrid wrid; /* 1 */
231 u8 pagesz[T3_MAX_SGE];
232 __be32 num_sgle; /* 2 */
233 struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
234 __be32 pbl_addr[T3_MAX_SGE];
237 struct t3_bypass_wr {
239 union t3_wrid wrid; /* 1 */
242 struct t3_modify_qp_wr {
243 struct fw_riwrh wrh; /* 0 */
244 union t3_wrid wrid; /* 1 */
245 __be32 flags; /* 2 */
246 __be32 quiesce; /* 2 */
247 __be32 max_ird; /* 3 */
248 __be32 max_ord; /* 3 */
249 __be64 sge_cmd; /* 4 */
254 enum t3_modify_qp_flags {
255 MODQP_QUIESCE = 0x01,
256 MODQP_MAX_IRD = 0x02,
257 MODQP_MAX_ORD = 0x04,
258 MODQP_WRITE_EC = 0x08,
259 MODQP_READ_EC = 0x10,
264 uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
265 uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
266 uP_RI_MPA_CRC_ENABLE = 0x4,
267 uP_RI_MPA_IETF_ENABLE = 0x8
268 } __attribute__ ((packed));
271 uP_RI_QP_RDMA_READ_ENABLE = 0x01,
272 uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
273 uP_RI_QP_BIND_ENABLE = 0x04,
274 uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
275 uP_RI_QP_STAG0_ENABLE = 0x10
276 } __attribute__ ((packed));
278 enum rdma_init_rtr_types {
285 #define M_RTR_TYPE 0x3
286 #define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
287 #define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
291 #define V_CHAN(x) ((x) << S_CHAN)
292 #define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
294 struct t3_rdma_init_attr {
302 enum t3_mpa_attrs mpaattrs;
303 enum t3_qp_caps qpcaps;
309 enum rdma_init_rtr_types rtr_type;
316 struct t3_rdma_init_wr {
317 struct fw_riwrh wrh; /* 0 */
318 union t3_wrid wrid; /* 1 */
321 __be32 scqid; /* 3 */
323 __be32 rq_addr; /* 4 */
328 __be16 flags_rtr_type;
332 __be64 qp_dma_addr; /* 7 */
333 __be32 qp_dma_size; /* 8 */
342 enum rdma_init_wr_flags {
343 MPA_INITIATOR = (1<<0),
348 struct t3_send_wr send;
349 struct t3_rdma_write_wr write;
350 struct t3_rdma_read_wr read;
351 struct t3_receive_wr recv;
352 struct t3_local_inv_wr local_inv;
353 struct t3_bind_mw_wr bind;
354 struct t3_bypass_wr bypass;
355 struct t3_rdma_init_wr init;
356 struct t3_modify_qp_wr qp_mod;
357 struct t3_genbit genbit;
361 #define T3_SQ_CQE_FLIT 13
362 #define T3_SQ_COOKIE_FLIT 14
364 #define T3_RQ_COOKIE_FLIT 13
365 #define T3_RQ_CQE_FLIT 14
367 static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
369 return G_FW_RIWR_OP(be32toh(wqe->op_seop_flags));
372 static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
373 enum t3_wr_flags flags, u8 genbit, u32 tid,
376 wqe->op_seop_flags = htobe32(V_FW_RIWR_OP(op) |
377 V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
378 V_FW_RIWR_FLAGS(flags));
380 wqe->gen_tid_len = htobe32(V_FW_RIWR_GEN(genbit) |
384 ((union t3_wr *)wqe)->genbit.genbit = htobe64(genbit);
388 * T3 ULP2_TX commands
395 /* T3 MC7 RDMA TPT entry format */
398 TPT_NON_SHARED_MR = 0x0,
401 TPT_MW_RELAXED_PROTECTION = 0x3
410 TPT_LOCAL_READ = 0x8,
411 TPT_LOCAL_WRITE = 0x4,
412 TPT_REMOTE_READ = 0x2,
413 TPT_REMOTE_WRITE = 0x1
417 __be32 valid_stag_pdid;
418 __be32 flags_pagesize_qpid;
420 __be32 rsvd_pbl_addr;
423 __be32 va_low_or_fbo;
425 __be32 rsvd_bind_cnt_or_pstag;
426 __be32 rsvd_pbl_size;
429 #define S_TPT_VALID 31
430 #define V_TPT_VALID(x) ((x) << S_TPT_VALID)
431 #define F_TPT_VALID V_TPT_VALID(1U)
433 #define S_TPT_STAG_KEY 23
434 #define M_TPT_STAG_KEY 0xFF
435 #define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
436 #define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
438 #define S_TPT_STAG_STATE 22
439 #define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
440 #define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
442 #define S_TPT_STAG_TYPE 20
443 #define M_TPT_STAG_TYPE 0x3
444 #define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
445 #define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
448 #define M_TPT_PDID 0xFFFFF
449 #define V_TPT_PDID(x) ((x) << S_TPT_PDID)
450 #define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
452 #define S_TPT_PERM 28
453 #define M_TPT_PERM 0xF
454 #define V_TPT_PERM(x) ((x) << S_TPT_PERM)
455 #define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
457 #define S_TPT_REM_INV_DIS 27
458 #define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
459 #define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
461 #define S_TPT_ADDR_TYPE 26
462 #define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
463 #define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
465 #define S_TPT_MW_BIND_ENABLE 25
466 #define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
467 #define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
469 #define S_TPT_PAGE_SIZE 20
470 #define M_TPT_PAGE_SIZE 0x1F
471 #define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
472 #define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
474 #define S_TPT_PBL_ADDR 0
475 #define M_TPT_PBL_ADDR 0x1FFFFFFF
476 #define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
477 #define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
480 #define M_TPT_QPID 0xFFFFF
481 #define V_TPT_QPID(x) ((x) << S_TPT_QPID)
482 #define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
484 #define S_TPT_PSTAG 0
485 #define M_TPT_PSTAG 0xFFFFFF
486 #define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
487 #define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
489 #define S_TPT_PBL_SIZE 0
490 #define M_TPT_PBL_SIZE 0xFFFFF
491 #define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
492 #define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
513 #define M_CQE_OOO 0x1
514 #define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
515 #define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
517 #define S_CQE_QPID 12
518 #define M_CQE_QPID 0x7FFFF
519 #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
520 #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
522 #define S_CQE_SWCQE 11
523 #define M_CQE_SWCQE 0x1
524 #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
525 #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
527 #define S_CQE_GENBIT 10
528 #define M_CQE_GENBIT 0x1
529 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
530 #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
532 #define S_CQE_STATUS 5
533 #define M_CQE_STATUS 0x1F
534 #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
535 #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
538 #define M_CQE_TYPE 0x1
539 #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
540 #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
542 #define S_CQE_OPCODE 0
543 #define M_CQE_OPCODE 0xF
544 #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
545 #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
547 #define SW_CQE(x) (G_CQE_SWCQE(be32toh((x).header)))
548 #define CQE_OOO(x) (G_CQE_OOO(be32toh((x).header)))
549 #define CQE_QPID(x) (G_CQE_QPID(be32toh((x).header)))
550 #define CQE_GENBIT(x) (G_CQE_GENBIT(be32toh((x).header)))
551 #define CQE_TYPE(x) (G_CQE_TYPE(be32toh((x).header)))
552 #define SQ_TYPE(x) (CQE_TYPE((x)))
553 #define RQ_TYPE(x) (!CQE_TYPE((x)))
554 #define CQE_STATUS(x) (G_CQE_STATUS(be32toh((x).header)))
555 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32toh((x).header)))
557 #define CQE_SEND_OPCODE(x)( \
558 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
559 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
560 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
561 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
563 #define CQE_LEN(x) (be32toh((x).len))
565 /* used for RQ completion processing */
566 #define CQE_WRID_STAG(x) (be32toh((x).u.rcqe.stag))
567 #define CQE_WRID_MSN(x) (be32toh((x).u.rcqe.msn))
569 /* used for SQ completion processing */
570 #define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
571 #define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
573 /* generic accessor macros */
574 #define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
575 #define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
577 #define TPT_ERR_SUCCESS 0x0
578 #define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
579 /* STAG is offlimt, being 0, */
580 /* or STAG_key mismatch */
581 #define TPT_ERR_PDID 0x2 /* PDID mismatch */
582 #define TPT_ERR_QPID 0x3 /* QPID mismatch */
583 #define TPT_ERR_ACCESS 0x4 /* Invalid access right */
584 #define TPT_ERR_WRAP 0x5 /* Wrap error */
585 #define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
586 #define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
587 /* shared memory region */
588 #define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
589 /* shared memory region */
590 #define TPT_ERR_ECC 0x9 /* ECC error detected */
591 #define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
592 /* reading PSTAG for a MW */
594 #define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
596 #define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
597 #define TPT_ERR_CRC 0x10 /* CRC error */
598 #define TPT_ERR_MARKER 0x11 /* Marker error */
599 #define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
600 #define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
601 #define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
602 #define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
603 #define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
604 #define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
605 #define TPT_ERR_MSN 0x18 /* MSN error */
606 #define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
607 #define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
609 #define TPT_ERR_MSN_GAP 0x1B
610 #define TPT_ERR_MSN_RANGE 0x1C
611 #define TPT_ERR_IRD_OVERFLOW 0x1D
612 #define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
614 #define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
633 * A T3 WQ implements both the SQ and RQ.
636 union t3_wr *queue; /* DMA accessable memory */
637 bus_addr_t dma_addr; /* DMA address for HW */
638 u32 error; /* 1 once we go to ERROR */
640 u32 wptr; /* idx to next available WR slot */
641 u32 size_log2; /* total wq size */
642 struct t3_swsq *sq; /* SW SQ */
643 struct t3_swsq *oldest_read; /* tracks oldest pending read */
644 u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
645 u32 sq_rptr; /* pending wrs */
646 u32 sq_size_log2; /* sq size */
647 struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
648 u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
649 u32 rq_rptr; /* pending wrs */
650 struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
651 u32 rq_size_log2; /* rq size */
652 u32 rq_addr; /* rq adapter address */
653 void *doorbell; /* kernel db */
654 u64 udb; /* user db if any */
655 struct cxio_rdev *rdev;
664 struct t3_cqe *queue;
665 struct t3_cqe *sw_queue;
670 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
673 struct t3_cq_status_page {
677 static inline int cxio_cq_in_error(struct t3_cq *cq)
679 return ((struct t3_cq_status_page *)
680 &cq->queue[1 << cq->size_log2])->cq_err;
683 static inline void cxio_set_cq_in_error(struct t3_cq *cq)
685 ((struct t3_cq_status_page *)
686 &cq->queue[1 << cq->size_log2])->cq_err = 1;
689 static inline void cxio_set_wq_in_error(struct t3_wq *wq)
691 wq->queue->flit[13] = 1;
694 static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
698 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
699 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
704 static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
708 if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
709 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
715 static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
719 if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
720 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
723 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
724 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))