2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include "common/t4_regs_values.h"
39 #include "common/t4_regs.h"
41 * Fixme: Adding missing defines
43 #define SGE_PF_KDOORBELL 0x0
44 #define QID_MASK 0xffff8000U
46 #define QID(x) ((x) << QID_SHIFT)
47 #define DBPRIO 0x00004000U
48 #define PIDX_MASK 0x00003fffU
50 #define PIDX(x) ((x) << PIDX_SHIFT)
52 #define SGE_PF_GTS 0x4
53 #define INGRESSQID_MASK 0xffff0000U
54 #define INGRESSQID_SHIFT 16
55 #define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
56 #define TIMERREG_MASK 0x0000e000U
57 #define TIMERREG_SHIFT 13
58 #define TIMERREG(x) ((x) << TIMERREG_SHIFT)
59 #define SEINTARM_MASK 0x00001000U
60 #define SEINTARM_SHIFT 12
61 #define SEINTARM(x) ((x) << SEINTARM_SHIFT)
62 #define CIDXINC_MASK 0x00000fffU
63 #define CIDXINC_SHIFT 0
64 #define CIDXINC(x) ((x) << CIDXINC_SHIFT)
66 #define T4_MAX_NUM_PD 65536
67 #define T4_MAX_MR_SIZE (~0ULL)
68 #define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */
69 #define T4_STAG_UNSET 0xffffffff
71 #define A_PCIE_MA_SYNC 0x30b4
73 struct t4_status_page {
74 __be32 rsvd1; /* flit 0 - hw owns */
79 u8 qp_err; /* flit 1 - sw owns */
87 #define T4_EQ_ENTRY_SIZE 64
89 #define T4_SQ_NUM_SLOTS 5
90 #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
91 #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
92 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
93 #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
94 sizeof(struct fw_ri_immd)))
95 #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
96 sizeof(struct fw_ri_rdma_write_wr) - \
97 sizeof(struct fw_ri_immd)))
98 #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
99 sizeof(struct fw_ri_rdma_write_wr) - \
100 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
101 #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
102 sizeof(struct fw_ri_immd)) & ~31UL)
103 #define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
104 #define T4_MAX_FR_DSGL 1024
105 #define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64))
107 static inline int t4_max_fr_depth(int use_dsgl)
109 return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH;
112 #define T4_RQ_NUM_SLOTS 2
113 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
114 #define T4_MAX_RECV_SGE 4
117 struct fw_ri_res_wr res;
119 struct fw_ri_rdma_write_wr write;
120 struct fw_ri_send_wr send;
121 struct fw_ri_rdma_read_wr read;
122 struct fw_ri_bind_mw_wr bind;
123 struct fw_ri_fr_nsmr_wr fr;
124 struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
125 struct fw_ri_inv_lstag_wr inv;
126 struct t4_status_page status;
127 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
131 struct fw_ri_recv_wr recv;
132 struct t4_status_page status;
133 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
136 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
137 enum fw_wr_opcodes opcode, u8 flags, u8 len16)
139 wqe->send.opcode = (u8)opcode;
140 wqe->send.flags = flags;
141 wqe->send.wrid = wrid;
145 wqe->send.len16 = len16;
148 /* CQE/AE status codes */
149 #define T4_ERR_SUCCESS 0x0
150 #define T4_ERR_STAG 0x1 /* STAG invalid: either the */
151 /* STAG is offlimt, being 0, */
152 /* or STAG_key mismatch */
153 #define T4_ERR_PDID 0x2 /* PDID mismatch */
154 #define T4_ERR_QPID 0x3 /* QPID mismatch */
155 #define T4_ERR_ACCESS 0x4 /* Invalid access right */
156 #define T4_ERR_WRAP 0x5 /* Wrap error */
157 #define T4_ERR_BOUND 0x6 /* base and bounds voilation */
158 #define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
159 /* shared memory region */
160 #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
161 /* shared memory region */
162 #define T4_ERR_ECC 0x9 /* ECC error detected */
163 #define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
164 /* reading PSTAG for a MW */
166 #define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
168 #define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
169 #define T4_ERR_CRC 0x10 /* CRC error */
170 #define T4_ERR_MARKER 0x11 /* Marker error */
171 #define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
172 #define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
173 #define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
174 #define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
175 #define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
176 #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
177 #define T4_ERR_MSN 0x18 /* MSN error */
178 #define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
179 #define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
181 #define T4_ERR_MSN_GAP 0x1B
182 #define T4_ERR_MSN_RANGE 0x1C
183 #define T4_ERR_IRD_OVERFLOW 0x1D
184 #define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
186 #define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
214 /* macros for flit 0 of the cqe */
216 #define S_CQE_QPID 12
217 #define M_CQE_QPID 0xFFFFF
218 #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
219 #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
221 #define S_CQE_SWCQE 11
222 #define M_CQE_SWCQE 0x1
223 #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
224 #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
226 #define S_CQE_STATUS 5
227 #define M_CQE_STATUS 0x1F
228 #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
229 #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
232 #define M_CQE_TYPE 0x1
233 #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
234 #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
236 #define S_CQE_OPCODE 0
237 #define M_CQE_OPCODE 0xF
238 #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
239 #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
241 #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
242 #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
243 #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
244 #define SQ_TYPE(x) (CQE_TYPE((x)))
245 #define RQ_TYPE(x) (!CQE_TYPE((x)))
246 #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
247 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
249 #define CQE_SEND_OPCODE(x)(\
250 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
251 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
252 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
253 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
255 #define CQE_LEN(x) (be32_to_cpu((x)->len))
257 /* used for RQ completion processing */
258 #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
259 #define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
261 /* used for SQ completion processing */
262 #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
263 #define CQE_WRID_FR_STAG(x) (be32_to_cpu((x)->u.scqe.stag))
265 /* generic accessor macros */
266 #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
267 #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
268 #define CQE_DRAIN_COOKIE(x) (x)->u.drain_cookie;
270 /* macros for flit 3 of the cqe */
271 #define S_CQE_GENBIT 63
272 #define M_CQE_GENBIT 0x1
273 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
274 #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
276 #define S_CQE_OVFBIT 62
277 #define M_CQE_OVFBIT 0x1
278 #define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
280 #define S_CQE_IQTYPE 60
281 #define M_CQE_IQTYPE 0x3
282 #define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
284 #define M_CQE_TS 0x0fffffffffffffffULL
285 #define G_CQE_TS(x) ((x) & M_CQE_TS)
287 #define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
288 #define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
289 #define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
300 struct timespec host_ts;
304 static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
306 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
307 return pgprot_writecombine(prot);
309 return pgprot_noncached(prot);
314 T4_SQ_ONCHIP = (1<<0),
320 DEFINE_DMA_UNMAP_ADDR(mapping);
321 unsigned long phys_addr;
322 struct t4_swsqe *sw_sq;
323 struct t4_swsqe *oldest_read;
324 void __iomem *bar2_va;
344 union t4_recv_wr *queue;
346 DEFINE_DMA_UNMAP_ADDR(mapping);
347 unsigned long phys_addr;
348 struct t4_swrqe *sw_rq;
349 void __iomem *bar2_va;
368 struct c4iw_rdev *rdev;
372 static inline int t4_rqes_posted(struct t4_wq *wq)
374 return wq->rq.in_use;
377 static inline int t4_rq_empty(struct t4_wq *wq)
379 return wq->rq.in_use == 0;
382 static inline int t4_rq_full(struct t4_wq *wq)
384 return wq->rq.in_use == (wq->rq.size - 1);
387 static inline u32 t4_rq_avail(struct t4_wq *wq)
389 return wq->rq.size - 1 - wq->rq.in_use;
392 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
395 if (++wq->rq.pidx == wq->rq.size)
397 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
398 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
399 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
402 static inline void t4_rq_consume(struct t4_wq *wq)
406 if (++wq->rq.cidx == wq->rq.size)
410 static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
412 return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
415 static inline u16 t4_rq_wq_size(struct t4_wq *wq)
417 return wq->rq.size * T4_RQ_NUM_SLOTS;
420 static inline int t4_sq_onchip(struct t4_sq *sq)
422 return sq->flags & T4_SQ_ONCHIP;
425 static inline int t4_sq_empty(struct t4_wq *wq)
427 return wq->sq.in_use == 0;
430 static inline int t4_sq_full(struct t4_wq *wq)
432 return wq->sq.in_use == (wq->sq.size - 1);
435 static inline u32 t4_sq_avail(struct t4_wq *wq)
437 return wq->sq.size - 1 - wq->sq.in_use;
440 static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
443 if (++wq->sq.pidx == wq->sq.size)
445 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
446 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
447 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
450 static inline void t4_sq_consume(struct t4_wq *wq)
452 BUG_ON(wq->sq.in_use < 1);
453 if (wq->sq.cidx == wq->sq.flush_cidx)
454 wq->sq.flush_cidx = -1;
456 if (++wq->sq.cidx == wq->sq.size)
460 static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
462 return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
465 static inline u16 t4_sq_wq_size(struct t4_wq *wq)
467 return wq->sq.size * T4_SQ_NUM_SLOTS;
470 /* This function copies 64 byte coalesced work request to memory
471 * mapped BAR2 space. For coalesced WRs, the SGE fetches data
472 * from the FIFO instead of from Host.
474 static inline void pio_copy(u64 __iomem *dst, u64 *src)
487 t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe, u8 wc)
490 /* Flush host queue memory writes. */
492 if (wc && inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
493 CTR2(KTR_IW_CXGBE, "%s: WC wq->sq.pidx = %d",
494 __func__, wq->sq.pidx);
495 pio_copy((u64 __iomem *)
496 ((u64)wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
499 CTR2(KTR_IW_CXGBE, "%s: DB wq->sq.pidx = %d",
500 __func__, wq->sq.pidx);
501 writel(V_PIDX_T5(inc) | V_QID(wq->sq.bar2_qid),
502 (void __iomem *)((u64)wq->sq.bar2_va +
506 /* Flush user doorbell area writes. */
512 t4_ring_rq_db(struct t4_wq *wq, u16 inc, union t4_recv_wr *wqe, u8 wc)
515 /* Flush host queue memory writes. */
517 if (wc && inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
518 CTR2(KTR_IW_CXGBE, "%s: WC wq->rq.pidx = %d",
519 __func__, wq->rq.pidx);
520 pio_copy((u64 __iomem *)((u64)wq->rq.bar2_va +
521 SGE_UDB_WCDOORBELL), (u64 *)wqe);
523 CTR2(KTR_IW_CXGBE, "%s: DB wq->rq.pidx = %d",
524 __func__, wq->rq.pidx);
525 writel(V_PIDX_T5(inc) | V_QID(wq->rq.bar2_qid),
526 (void __iomem *)((u64)wq->rq.bar2_va +
530 /* Flush user doorbell area writes. */
535 static inline int t4_wq_in_error(struct t4_wq *wq)
537 return wq->rq.queue[wq->rq.size].status.qp_err;
540 static inline void t4_set_wq_in_error(struct t4_wq *wq)
542 wq->rq.queue[wq->rq.size].status.qp_err = 1;
550 struct t4_cqe *queue;
552 DEFINE_DMA_UNMAP_ADDR(mapping);
553 struct t4_cqe *sw_queue;
554 void __iomem *bar2_va;
557 struct c4iw_rdev *rdev;
563 u16 size; /* including status page */
574 static inline void write_gts(struct t4_cq *cq, u32 val)
576 writel(val | V_INGRESSQID(cq->bar2_qid),
577 (void __iomem *)((u64)cq->bar2_va + SGE_UDB_GTS));
580 static inline int t4_clear_cq_armed(struct t4_cq *cq)
582 return test_and_clear_bit(CQ_ARMED, &cq->flags);
585 static inline int t4_arm_cq(struct t4_cq *cq, int se)
589 set_bit(CQ_ARMED, &cq->flags);
590 while (cq->cidx_inc > CIDXINC_MASK) {
591 val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7);
592 writel(val | V_INGRESSQID(cq->bar2_qid),
593 (void __iomem *)((u64)cq->bar2_va + SGE_UDB_GTS));
594 cq->cidx_inc -= CIDXINC_MASK;
596 val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6);
597 writel(val | V_INGRESSQID(cq->bar2_qid),
598 (void __iomem *)((u64)cq->bar2_va + SGE_UDB_GTS));
603 static inline void t4_swcq_produce(struct t4_cq *cq)
606 if (cq->sw_in_use == cq->size) {
607 CTR2(KTR_IW_CXGBE, "%s cxgb4 sw cq overflow cqid %u",
612 if (++cq->sw_pidx == cq->size)
616 static inline void t4_swcq_consume(struct t4_cq *cq)
618 BUG_ON(cq->sw_in_use < 1);
620 if (++cq->sw_cidx == cq->size)
624 static inline void t4_hwcq_consume(struct t4_cq *cq)
626 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
627 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == M_CIDXINC) {
630 val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7);
634 if (++cq->cidx == cq->size) {
640 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
642 return (CQE_GENBIT(cqe) == cq->gen);
645 static inline int t4_cq_notempty(struct t4_cq *cq)
647 return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
650 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
656 prev_cidx = cq->size - 1;
658 prev_cidx = cq->cidx - 1;
660 if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
663 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
665 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
667 /* Ensure CQE is flushed to memory */
669 *cqe = &cq->queue[cq->cidx];
676 static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
678 if (cq->sw_in_use == cq->size) {
679 CTR2(KTR_IW_CXGBE, "%s cxgb4 sw cq overflow cqid %u",
686 return &cq->sw_queue[cq->sw_cidx];
690 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
696 else if (cq->sw_in_use)
697 *cqe = &cq->sw_queue[cq->sw_cidx];
699 ret = t4_next_hw_cqe(cq, cqe);
703 static inline int t4_cq_in_error(struct t4_cq *cq)
705 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
708 static inline void t4_set_cq_in_error(struct t4_cq *cq)
710 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
712 struct t4_dev_status_page {