]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.h
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / dev / cxgb / ulp / iw_cxgb / iw_cxgb_provider.h
1 /**************************************************************************
2
3 Copyright (c) 2007, 2008 Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28 $FreeBSD$
29
30 ***************************************************************************/
31 #ifndef __IWCH_PROVIDER_H__
32 #define __IWCH_PROVIDER_H__
33
34 #include <rdma/ib_verbs.h>
35
36 struct iwch_pd {
37         struct ib_pd ibpd;
38         u32 pdid;
39         struct iwch_dev *rhp;
40 };
41
42 #ifndef container_of
43 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
44 #endif
45 static __inline struct iwch_pd *
46 to_iwch_pd(struct ib_pd *ibpd)
47 {
48         return container_of(ibpd, struct iwch_pd, ibpd);
49 }
50
51 struct tpt_attributes {
52         u32 stag;
53         u32 state:1;
54         u32 type:2;
55         u32 rsvd:1;
56         enum tpt_mem_perm perms;
57         u32 remote_invaliate_disable:1;
58         u32 zbva:1;
59         u32 mw_bind_enable:1;
60         u32 page_size:5;
61
62         u32 pdid;
63         u32 qpid;
64         u32 pbl_addr;
65         u32 len;
66         u64 va_fbo;
67         u32 pbl_size;
68 };
69
70 struct iwch_mr {
71         struct ib_mr ibmr;
72         struct ib_umem *umem;
73         struct iwch_dev *rhp;
74         u64 kva;
75         struct tpt_attributes attr;
76 };
77
78 typedef struct iwch_mw iwch_mw_handle;
79
80 static __inline struct iwch_mr *
81 to_iwch_mr(struct ib_mr *ibmr)
82 {
83         return container_of(ibmr, struct iwch_mr, ibmr);
84 }
85
86 struct iwch_mw {
87         struct ib_mw ibmw;
88         struct iwch_dev *rhp;
89         u64 kva;
90         struct tpt_attributes attr;
91 };
92
93 static __inline struct iwch_mw *
94 to_iwch_mw(struct ib_mw *ibmw)
95 {
96         return container_of(ibmw, struct iwch_mw, ibmw);
97 }
98
99 struct iwch_cq {
100         struct ib_cq ibcq;
101         struct iwch_dev *rhp;
102         struct t3_cq cq;
103         struct mtx lock;
104         int refcnt;
105         u32 /* __user */ *user_rptr_addr;
106 };
107
108 static __inline struct iwch_cq *
109 to_iwch_cq(struct ib_cq *ibcq)
110 {
111         return container_of(ibcq, struct iwch_cq, ibcq);
112 }
113
114 enum IWCH_QP_FLAGS {
115         QP_QUIESCED = 0x01
116 };
117
118 struct iwch_mpa_attributes {
119         u8 initiator;
120         u8 recv_marker_enabled;
121         u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
122         u8 crc_enabled;
123         u8 version;     /* 0 or 1 */
124 };
125
126 struct iwch_qp_attributes {
127         u32 scq;
128         u32 rcq;
129         u32 sq_num_entries;
130         u32 rq_num_entries;
131         u32 sq_max_sges;
132         u32 sq_max_sges_rdma_write;
133         u32 rq_max_sges;
134         u32 state;
135         u8 enable_rdma_read;
136         u8 enable_rdma_write;   /* enable inbound Read Resp. */
137         u8 enable_bind;
138         u8 enable_mmid0_fastreg;        /* Enable STAG0 + Fast-register */
139         /*
140          * Next QP state. If specify the current state, only the
141          * QP attributes will be modified.
142          */
143         u32 max_ord;
144         u32 max_ird;
145         u32 pd; /* IN */
146         u32 next_state;
147         char terminate_buffer[52];
148         u32 terminate_msg_len;
149         u8 is_terminate_local;
150         struct iwch_mpa_attributes mpa_attr;    /* IN-OUT */
151         struct iwch_ep *llp_stream_handle;
152         char *stream_msg_buf;   /* Last stream msg. before Idle -> RTS */
153         u32 stream_msg_buf_len; /* Only on Idle -> RTS */
154 };
155
156 struct iwch_qp {
157         struct ib_qp ibqp;
158         struct iwch_dev *rhp;
159         struct iwch_ep *ep;
160         struct iwch_qp_attributes attr;
161         struct t3_wq wq;
162         struct mtx lock;
163         int refcnt;
164         enum IWCH_QP_FLAGS flags;
165         struct callout timer;
166 };
167
168 static __inline int
169 qp_quiesced(struct iwch_qp *qhp)
170 {
171         return qhp->flags & QP_QUIESCED;
172 }
173
174 static __inline struct iwch_qp *
175 to_iwch_qp(struct ib_qp *ibqp)
176 {
177         return container_of(ibqp, struct iwch_qp, ibqp);
178 }
179
180 void iwch_qp_add_ref(struct ib_qp *qp);
181 void iwch_qp_rem_ref(struct ib_qp *qp);
182
183 struct iwch_ucontext {
184         struct ib_ucontext ibucontext;
185         struct cxio_ucontext uctx;
186         u32 key;
187         struct mtx mmap_lock;
188         TAILQ_HEAD( ,iwch_mm_entry) mmaps;
189 };
190
191 static __inline struct iwch_ucontext *
192 to_iwch_ucontext(struct ib_ucontext *c)
193 {
194         return container_of(c, struct iwch_ucontext, ibucontext);
195 }
196
197 struct iwch_mm_entry {
198         TAILQ_ENTRY(iwch_mm_entry) entry;
199         u64 addr;
200         u32 key;
201         unsigned len;
202 };
203
204 static __inline struct iwch_mm_entry *
205 remove_mmap(struct iwch_ucontext *ucontext,
206                                                 u32 key, unsigned len)
207 {
208         struct iwch_mm_entry *tmp, *mm;
209
210         mtx_lock(&ucontext->mmap_lock);
211         TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
212                 if (mm->key == key && mm->len == len) {
213                         TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
214                         mtx_unlock(&ucontext->mmap_lock);
215                         CTR4(KTR_IW_CXGB, "%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
216                              key, (unsigned long long) mm->addr, mm->len);
217                         return mm;
218                 }
219         }
220         mtx_unlock(&ucontext->mmap_lock);
221
222         return NULL;
223 }
224
225 static __inline void
226 insert_mmap(struct iwch_ucontext *ucontext,
227                                struct iwch_mm_entry *mm)
228 {
229         mtx_lock(&ucontext->mmap_lock);
230         CTR4(KTR_IW_CXGB, "%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
231              mm->key, (unsigned long long) mm->addr, mm->len);
232         TAILQ_INSERT_TAIL(&ucontext->mmaps, mm, entry);
233         mtx_unlock(&ucontext->mmap_lock);
234 }
235
236 enum iwch_qp_attr_mask {
237         IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
238         IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
239         IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
240         IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
241         IWCH_QP_ATTR_MAX_ORD = 1 << 11,
242         IWCH_QP_ATTR_MAX_IRD = 1 << 12,
243         IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
244         IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
245         IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
246         IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
247         IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
248                                      IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
249                                      IWCH_QP_ATTR_MAX_ORD |
250                                      IWCH_QP_ATTR_MAX_IRD |
251                                      IWCH_QP_ATTR_LLP_STREAM_HANDLE |
252                                      IWCH_QP_ATTR_STREAM_MSG_BUFFER |
253                                      IWCH_QP_ATTR_MPA_ATTR |
254                                      IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
255 };
256
257 int iwch_modify_qp(struct iwch_dev *rhp,
258                                 struct iwch_qp *qhp,
259                                 enum iwch_qp_attr_mask mask,
260                                 struct iwch_qp_attributes *attrs,
261                                 int internal);
262
263 enum iwch_qp_state {
264         IWCH_QP_STATE_IDLE,
265         IWCH_QP_STATE_RTS,
266         IWCH_QP_STATE_ERROR,
267         IWCH_QP_STATE_TERMINATE,
268         IWCH_QP_STATE_CLOSING,
269         IWCH_QP_STATE_TOT
270 };
271
272 static __inline int
273 iwch_convert_state(enum ib_qp_state ib_state)
274 {
275         switch (ib_state) {
276         case IB_QPS_RESET:
277         case IB_QPS_INIT:
278                 return IWCH_QP_STATE_IDLE;
279         case IB_QPS_RTS:
280                 return IWCH_QP_STATE_RTS;
281         case IB_QPS_SQD:
282                 return IWCH_QP_STATE_CLOSING;
283         case IB_QPS_SQE:
284                 return IWCH_QP_STATE_TERMINATE;
285         case IB_QPS_ERR:
286                 return IWCH_QP_STATE_ERROR;
287         default:
288                 return -1;
289         }
290 }
291
292 static __inline u32
293 iwch_ib_to_tpt_access(int acc)
294 {
295         return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
296                (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
297                (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
298                TPT_LOCAL_READ;
299 }
300
301 static __inline u32
302 iwch_ib_to_mwbind_access(int acc)
303 {
304         return (acc & IB_ACCESS_REMOTE_WRITE ? T3_MEM_ACCESS_REM_WRITE : 0) |
305                (acc & IB_ACCESS_REMOTE_READ ? T3_MEM_ACCESS_REM_READ : 0) |
306                (acc & IB_ACCESS_LOCAL_WRITE ? T3_MEM_ACCESS_LOCAL_WRITE : 0) |
307                T3_MEM_ACCESS_LOCAL_READ;
308 }
309
310 enum iwch_mmid_state {
311         IWCH_STAG_STATE_VALID,
312         IWCH_STAG_STATE_INVALID
313 };
314
315 enum iwch_qp_query_flags {
316         IWCH_QP_QUERY_CONTEXT_NONE = 0x0,       /* No ctx; Only attrs */
317         IWCH_QP_QUERY_CONTEXT_GET = 0x1,        /* Get ctx + attrs */
318         IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2,    /* Not Supported */
319
320         /*
321          * Quiesce QP context; Consumer
322          * will NOT replay outstanding WR
323          */
324         IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
325         IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
326         IWCH_QP_QUERY_TEST_USERWRITE = 0x32     /* Test special */
327 };
328
329 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
330                       struct ib_send_wr **bad_wr);
331 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
332                       struct ib_recv_wr **bad_wr);
333 int iwch_bind_mw(struct ib_qp *qp,
334                              struct ib_mw *mw,
335                              struct ib_mw_bind *mw_bind);
336 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
337 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
338 int iwch_register_device(struct iwch_dev *dev);
339 void iwch_unregister_device(struct iwch_dev *dev);
340 void stop_read_rep_timer(struct iwch_qp *qhp);
341 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
342                                         struct iwch_mr *mhp,
343                                         int shift);
344 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
345                                         struct iwch_mr *mhp,
346                                         int shift,
347                                         int npages);
348 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
349 void iwch_free_pbl(struct iwch_mr *mhp);
350 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
351 int build_phys_page_list(struct ib_phys_buf *buffer_list,
352                                         int num_phys_buf,
353                                         u64 *iova_start,
354                                         u64 *total_size,
355                                         int *npages,
356                                         int *shift,
357                                         __be64 **page_list);
358
359
360 #define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
361
362 #endif