1 /**************************************************************************
3 Copyright (c) 2007, 2008 Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
30 ***************************************************************************/
31 #ifndef __IWCH_PROVIDER_H__
32 #define __IWCH_PROVIDER_H__
34 #include <rdma/ib_verbs.h>
43 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
45 static __inline struct iwch_pd *
46 to_iwch_pd(struct ib_pd *ibpd)
48 return container_of(ibpd, struct iwch_pd, ibpd);
51 struct tpt_attributes {
56 enum tpt_mem_perm perms;
57 u32 remote_invaliate_disable:1;
75 struct tpt_attributes attr;
78 typedef struct iwch_mw iwch_mw_handle;
80 static __inline struct iwch_mr *
81 to_iwch_mr(struct ib_mr *ibmr)
83 return container_of(ibmr, struct iwch_mr, ibmr);
90 struct tpt_attributes attr;
93 static __inline struct iwch_mw *
94 to_iwch_mw(struct ib_mw *ibmw)
96 return container_of(ibmw, struct iwch_mw, ibmw);
101 struct iwch_dev *rhp;
105 u32 /* __user */ *user_rptr_addr;
108 static __inline struct iwch_cq *
109 to_iwch_cq(struct ib_cq *ibcq)
111 return container_of(ibcq, struct iwch_cq, ibcq);
118 struct iwch_mpa_attributes {
120 u8 recv_marker_enabled;
121 u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
123 u8 version; /* 0 or 1 */
126 struct iwch_qp_attributes {
132 u32 sq_max_sges_rdma_write;
136 u8 enable_rdma_write; /* enable inbound Read Resp. */
138 u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
140 * Next QP state. If specify the current state, only the
141 * QP attributes will be modified.
147 char terminate_buffer[52];
148 u32 terminate_msg_len;
149 u8 is_terminate_local;
150 struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
151 struct iwch_ep *llp_stream_handle;
152 char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
153 u32 stream_msg_buf_len; /* Only on Idle -> RTS */
158 struct iwch_dev *rhp;
160 struct iwch_qp_attributes attr;
164 enum IWCH_QP_FLAGS flags;
165 struct callout timer;
169 qp_quiesced(struct iwch_qp *qhp)
171 return qhp->flags & QP_QUIESCED;
174 static __inline struct iwch_qp *
175 to_iwch_qp(struct ib_qp *ibqp)
177 return container_of(ibqp, struct iwch_qp, ibqp);
180 void iwch_qp_add_ref(struct ib_qp *qp);
181 void iwch_qp_rem_ref(struct ib_qp *qp);
183 struct iwch_ucontext {
184 struct ib_ucontext ibucontext;
185 struct cxio_ucontext uctx;
187 struct mtx mmap_lock;
188 TAILQ_HEAD( ,iwch_mm_entry) mmaps;
191 static __inline struct iwch_ucontext *
192 to_iwch_ucontext(struct ib_ucontext *c)
194 return container_of(c, struct iwch_ucontext, ibucontext);
197 struct iwch_mm_entry {
198 TAILQ_ENTRY(iwch_mm_entry) entry;
204 static __inline struct iwch_mm_entry *
205 remove_mmap(struct iwch_ucontext *ucontext,
206 u32 key, unsigned len)
208 struct iwch_mm_entry *tmp, *mm;
210 mtx_lock(&ucontext->mmap_lock);
211 TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
212 if (mm->key == key && mm->len == len) {
213 TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
214 mtx_unlock(&ucontext->mmap_lock);
215 CTR4(KTR_IW_CXGB, "%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
216 key, (unsigned long long) mm->addr, mm->len);
220 mtx_unlock(&ucontext->mmap_lock);
226 insert_mmap(struct iwch_ucontext *ucontext,
227 struct iwch_mm_entry *mm)
229 mtx_lock(&ucontext->mmap_lock);
230 CTR4(KTR_IW_CXGB, "%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
231 mm->key, (unsigned long long) mm->addr, mm->len);
232 TAILQ_INSERT_TAIL(&ucontext->mmaps, mm, entry);
233 mtx_unlock(&ucontext->mmap_lock);
236 enum iwch_qp_attr_mask {
237 IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
238 IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
239 IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
240 IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
241 IWCH_QP_ATTR_MAX_ORD = 1 << 11,
242 IWCH_QP_ATTR_MAX_IRD = 1 << 12,
243 IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
244 IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
245 IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
246 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
247 IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
248 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
249 IWCH_QP_ATTR_MAX_ORD |
250 IWCH_QP_ATTR_MAX_IRD |
251 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
252 IWCH_QP_ATTR_STREAM_MSG_BUFFER |
253 IWCH_QP_ATTR_MPA_ATTR |
254 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
257 int iwch_modify_qp(struct iwch_dev *rhp,
259 enum iwch_qp_attr_mask mask,
260 struct iwch_qp_attributes *attrs,
267 IWCH_QP_STATE_TERMINATE,
268 IWCH_QP_STATE_CLOSING,
273 iwch_convert_state(enum ib_qp_state ib_state)
278 return IWCH_QP_STATE_IDLE;
280 return IWCH_QP_STATE_RTS;
282 return IWCH_QP_STATE_CLOSING;
284 return IWCH_QP_STATE_TERMINATE;
286 return IWCH_QP_STATE_ERROR;
293 iwch_ib_to_tpt_access(int acc)
295 return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
296 (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
297 (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
302 iwch_ib_to_mwbind_access(int acc)
304 return (acc & IB_ACCESS_REMOTE_WRITE ? T3_MEM_ACCESS_REM_WRITE : 0) |
305 (acc & IB_ACCESS_REMOTE_READ ? T3_MEM_ACCESS_REM_READ : 0) |
306 (acc & IB_ACCESS_LOCAL_WRITE ? T3_MEM_ACCESS_LOCAL_WRITE : 0) |
307 T3_MEM_ACCESS_LOCAL_READ;
310 enum iwch_mmid_state {
311 IWCH_STAG_STATE_VALID,
312 IWCH_STAG_STATE_INVALID
315 enum iwch_qp_query_flags {
316 IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
317 IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
318 IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
321 * Quiesce QP context; Consumer
322 * will NOT replay outstanding WR
324 IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
325 IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
326 IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
329 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
330 struct ib_send_wr **bad_wr);
331 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
332 struct ib_recv_wr **bad_wr);
333 int iwch_bind_mw(struct ib_qp *qp,
335 struct ib_mw_bind *mw_bind);
336 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
337 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
338 int iwch_register_device(struct iwch_dev *dev);
339 void iwch_unregister_device(struct iwch_dev *dev);
340 void stop_read_rep_timer(struct iwch_qp *qhp);
341 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
344 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
348 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
349 void iwch_free_pbl(struct iwch_mr *mhp);
350 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
351 int build_phys_page_list(struct ib_phys_buf *buffer_list,
360 #define IWCH_NODE_DESC "cxgb3 Chelsio Communications"