]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.h
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / dev / cxgb / ulp / iw_cxgb / iw_cxgb_provider.h
1 /**************************************************************************
2
3 Copyright (c) 2007, 2008 Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28 $FreeBSD$
29
30 ***************************************************************************/
31 #ifndef __IWCH_PROVIDER_H__
32 #define __IWCH_PROVIDER_H__
33
34 #include <contrib/rdma/ib_verbs.h>
35
36 struct iwch_pd {
37         struct ib_pd ibpd;
38         u32 pdid;
39         struct iwch_dev *rhp;
40 };
41
42 #ifndef container_of
43 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
44 #endif
45 static __inline struct iwch_pd *
46 to_iwch_pd(struct ib_pd *ibpd)
47 {
48         return container_of(ibpd, struct iwch_pd, ibpd);
49 }
50
51 struct tpt_attributes {
52         u32 stag;
53         u32 state:1;
54         u32 type:2;
55         u32 rsvd:1;
56         enum tpt_mem_perm perms;
57         u32 remote_invaliate_disable:1;
58         u32 zbva:1;
59         u32 mw_bind_enable:1;
60         u32 page_size:5;
61
62         u32 pdid;
63         u32 qpid;
64         u32 pbl_addr;
65         u32 len;
66         u64 va_fbo;
67         u32 pbl_size;
68 };
69
70 struct iwch_mr {
71         struct ib_mr ibmr;
72         struct ib_umem *umem;
73         struct iwch_dev *rhp;
74         u64 kva;
75         struct tpt_attributes attr;
76 };
77
78 typedef struct iwch_mw iwch_mw_handle;
79
80 static __inline struct iwch_mr *
81 to_iwch_mr(struct ib_mr *ibmr)
82 {
83         return container_of(ibmr, struct iwch_mr, ibmr);
84 }
85
86 struct iwch_mw {
87         struct ib_mw ibmw;
88         struct iwch_dev *rhp;
89         u64 kva;
90         struct tpt_attributes attr;
91 };
92
93 static __inline struct iwch_mw *
94 to_iwch_mw(struct ib_mw *ibmw)
95 {
96         return container_of(ibmw, struct iwch_mw, ibmw);
97 }
98
99 struct iwch_cq {
100         struct ib_cq ibcq;
101         struct iwch_dev *rhp;
102         struct t3_cq cq;
103         struct mtx lock;
104         int refcnt;
105         u32 /* __user */ *user_rptr_addr;
106 };
107
108 static __inline struct iwch_cq *
109 to_iwch_cq(struct ib_cq *ibcq)
110 {
111         return container_of(ibcq, struct iwch_cq, ibcq);
112 }
113
114 enum IWCH_QP_FLAGS {
115         QP_QUIESCED = 0x01
116 };
117
118 struct iwch_mpa_attributes {
119         u8 recv_marker_enabled;
120         u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
121         u8 crc_enabled;
122         u8 version;     /* 0 or 1 */
123 };
124
125 struct iwch_qp_attributes {
126         u32 scq;
127         u32 rcq;
128         u32 sq_num_entries;
129         u32 rq_num_entries;
130         u32 sq_max_sges;
131         u32 sq_max_sges_rdma_write;
132         u32 rq_max_sges;
133         u32 state;
134         u8 enable_rdma_read;
135         u8 enable_rdma_write;   /* enable inbound Read Resp. */
136         u8 enable_bind;
137         u8 enable_mmid0_fastreg;        /* Enable STAG0 + Fast-register */
138         /*
139          * Next QP state. If specify the current state, only the
140          * QP attributes will be modified.
141          */
142         u32 max_ord;
143         u32 max_ird;
144         u32 pd; /* IN */
145         u32 next_state;
146         char terminate_buffer[52];
147         u32 terminate_msg_len;
148         u8 is_terminate_local;
149         struct iwch_mpa_attributes mpa_attr;    /* IN-OUT */
150         struct iwch_ep *llp_stream_handle;
151         char *stream_msg_buf;   /* Last stream msg. before Idle -> RTS */
152         u32 stream_msg_buf_len; /* Only on Idle -> RTS */
153 };
154
155 struct iwch_qp {
156         struct ib_qp ibqp;
157         struct iwch_dev *rhp;
158         struct iwch_ep *ep;
159         struct iwch_qp_attributes attr;
160         struct t3_wq wq;
161         struct mtx lock;
162         int refcnt;
163         enum IWCH_QP_FLAGS flags;
164         struct callout timer;
165 };
166
167 static __inline int
168 qp_quiesced(struct iwch_qp *qhp)
169 {
170         return qhp->flags & QP_QUIESCED;
171 }
172
173 static __inline struct iwch_qp *
174 to_iwch_qp(struct ib_qp *ibqp)
175 {
176         return container_of(ibqp, struct iwch_qp, ibqp);
177 }
178
179 void iwch_qp_add_ref(struct ib_qp *qp);
180 void iwch_qp_rem_ref(struct ib_qp *qp);
181
182 struct iwch_ucontext {
183         struct ib_ucontext ibucontext;
184         struct cxio_ucontext uctx;
185         u32 key;
186         struct mtx mmap_lock;
187         TAILQ_HEAD( ,iwch_mm_entry) mmaps;
188 };
189
190 static __inline struct iwch_ucontext *
191 to_iwch_ucontext(struct ib_ucontext *c)
192 {
193         return container_of(c, struct iwch_ucontext, ibucontext);
194 }
195
196 struct iwch_mm_entry {
197         TAILQ_ENTRY(iwch_mm_entry) entry;
198         u64 addr;
199         u32 key;
200         unsigned len;
201 };
202
203 static __inline struct iwch_mm_entry *
204 remove_mmap(struct iwch_ucontext *ucontext,
205                                                 u32 key, unsigned len)
206 {
207         struct iwch_mm_entry *tmp, *mm;
208
209         mtx_lock(&ucontext->mmap_lock);
210         TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
211                 if (mm->key == key && mm->len == len) {
212                         TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
213                         mtx_unlock(&ucontext->mmap_lock);
214                         CTR4(KTR_IW_CXGB, "%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
215                              key, (unsigned long long) mm->addr, mm->len);
216                         return mm;
217                 }
218         }
219         mtx_unlock(&ucontext->mmap_lock);
220
221         return NULL;
222 }
223
224 static __inline void
225 insert_mmap(struct iwch_ucontext *ucontext,
226                                struct iwch_mm_entry *mm)
227 {
228         mtx_lock(&ucontext->mmap_lock);
229         CTR4(KTR_IW_CXGB, "%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
230              mm->key, (unsigned long long) mm->addr, mm->len);
231         TAILQ_INSERT_TAIL(&ucontext->mmaps, mm, entry);
232         mtx_unlock(&ucontext->mmap_lock);
233 }
234
235 enum iwch_qp_attr_mask {
236         IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
237         IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
238         IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
239         IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
240         IWCH_QP_ATTR_MAX_ORD = 1 << 11,
241         IWCH_QP_ATTR_MAX_IRD = 1 << 12,
242         IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
243         IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
244         IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
245         IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
246         IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
247                                      IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
248                                      IWCH_QP_ATTR_MAX_ORD |
249                                      IWCH_QP_ATTR_MAX_IRD |
250                                      IWCH_QP_ATTR_LLP_STREAM_HANDLE |
251                                      IWCH_QP_ATTR_STREAM_MSG_BUFFER |
252                                      IWCH_QP_ATTR_MPA_ATTR |
253                                      IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
254 };
255
256 int iwch_modify_qp(struct iwch_dev *rhp,
257                                 struct iwch_qp *qhp,
258                                 enum iwch_qp_attr_mask mask,
259                                 struct iwch_qp_attributes *attrs,
260                                 int internal);
261
262 enum iwch_qp_state {
263         IWCH_QP_STATE_IDLE,
264         IWCH_QP_STATE_RTS,
265         IWCH_QP_STATE_ERROR,
266         IWCH_QP_STATE_TERMINATE,
267         IWCH_QP_STATE_CLOSING,
268         IWCH_QP_STATE_TOT
269 };
270
271 static __inline int
272 iwch_convert_state(enum ib_qp_state ib_state)
273 {
274         switch (ib_state) {
275         case IB_QPS_RESET:
276         case IB_QPS_INIT:
277                 return IWCH_QP_STATE_IDLE;
278         case IB_QPS_RTS:
279                 return IWCH_QP_STATE_RTS;
280         case IB_QPS_SQD:
281                 return IWCH_QP_STATE_CLOSING;
282         case IB_QPS_SQE:
283                 return IWCH_QP_STATE_TERMINATE;
284         case IB_QPS_ERR:
285                 return IWCH_QP_STATE_ERROR;
286         default:
287                 return -1;
288         }
289 }
290
291 static __inline u32
292 iwch_ib_to_tpt_access(int acc)
293 {
294         return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
295                (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
296                (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
297                TPT_LOCAL_READ;
298 }
299
300 static __inline u32
301 iwch_ib_to_mwbind_access(int acc)
302 {
303         return (acc & IB_ACCESS_REMOTE_WRITE ? T3_MEM_ACCESS_REM_WRITE : 0) |
304                (acc & IB_ACCESS_REMOTE_READ ? T3_MEM_ACCESS_REM_READ : 0) |
305                (acc & IB_ACCESS_LOCAL_WRITE ? T3_MEM_ACCESS_LOCAL_WRITE : 0) |
306                T3_MEM_ACCESS_LOCAL_READ;
307 }
308
309 enum iwch_mmid_state {
310         IWCH_STAG_STATE_VALID,
311         IWCH_STAG_STATE_INVALID
312 };
313
314 enum iwch_qp_query_flags {
315         IWCH_QP_QUERY_CONTEXT_NONE = 0x0,       /* No ctx; Only attrs */
316         IWCH_QP_QUERY_CONTEXT_GET = 0x1,        /* Get ctx + attrs */
317         IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2,    /* Not Supported */
318
319         /*
320          * Quiesce QP context; Consumer
321          * will NOT replay outstanding WR
322          */
323         IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
324         IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
325         IWCH_QP_QUERY_TEST_USERWRITE = 0x32     /* Test special */
326 };
327
328 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
329                       struct ib_send_wr **bad_wr);
330 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
331                       struct ib_recv_wr **bad_wr);
332 int iwch_bind_mw(struct ib_qp *qp,
333                              struct ib_mw *mw,
334                              struct ib_mw_bind *mw_bind);
335 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
336 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
337 int iwch_register_device(struct iwch_dev *dev);
338 void iwch_unregister_device(struct iwch_dev *dev);
339 int iwch_quiesce_qps(struct iwch_cq *chp);
340 int iwch_resume_qps(struct iwch_cq *chp);
341 void stop_read_rep_timer(struct iwch_qp *qhp);
342 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
343                                         struct iwch_mr *mhp,
344                                         int shift,
345                                         __be64 *page_list);
346 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
347                                         struct iwch_mr *mhp,
348                                         int shift,
349                                         __be64 *page_list,
350                                         int npages);
351 int build_phys_page_list(struct ib_phys_buf *buffer_list,
352                                         int num_phys_buf,
353                                         u64 *iova_start,
354                                         u64 *total_size,
355                                         int *npages,
356                                         int *shift,
357                                         __be64 **page_list);
358
359
360 #define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
361
362 #endif