2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #ifndef MTHCA_PROVIDER_H
36 #define MTHCA_PROVIDER_H
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_pack.h>
41 #include <linux/wait.h>
43 #define MTHCA_MPT_FLAG_ATOMIC (1 << 14)
44 #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)
45 #define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12)
46 #define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11)
47 #define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10)
49 struct mthca_buf_list {
51 DEFINE_DMA_UNMAP_ADDR(mapping);
55 struct mthca_buf_list direct;
56 struct mthca_buf_list *page_list;
64 struct mthca_user_db_table;
66 struct mthca_ucontext {
67 struct ib_ucontext ibucontext;
69 struct mthca_user_db_table *db_tab;
78 struct mthca_mtt *mtt;
83 struct ib_fmr_attr attr;
84 struct mthca_mtt *mtt;
88 struct mthca_mpt_entry __iomem *mpt;
92 struct mthca_mpt_entry *mpt;
94 dma_addr_t dma_handle;
103 struct mthca_mr ntmr;
108 struct mthca_dev *dev;
116 struct mthca_buf_list *page_list;
118 char irq_name[IB_DEVICE_NAME_MAX];
131 enum mthca_ah_type type;
138 * Quick description of our CQ/QP locking scheme:
140 * We have one global lock that protects dev->cq/qp_table. Each
141 * struct mthca_cq/qp also has its own lock. An individual qp lock
142 * may be taken inside of an individual cq lock. Both cqs attached to
143 * a qp may be locked, with the cq with the lower cqn locked first.
144 * No other nesting should be done.
146 * Each struct mthca_cq/qp also has an ref count, protected by the
147 * corresponding table lock. The pointer from the cq/qp_table to the
148 * struct counts as one reference. This reference also is good for
149 * access through the consumer API, so modifying the CQ/QP etc doesn't
150 * need to take another reference. Access to a QP because of a
151 * completion being polled does not need a reference either.
153 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
154 * destroy function to sleep on.
156 * This means that access from the consumer API requires nothing but
157 * taking the struct's lock.
159 * Access because of a completion event should go as follows:
160 * - lock cq/qp_table and look up struct
161 * - increment ref count in struct
162 * - drop cq/qp_table lock
163 * - lock struct, do your thing, and unlock struct
164 * - decrement ref count; if zero, wake up waiters
166 * To destroy a CQ/QP, we can do the following:
168 * - remove pointer and decrement ref count
169 * - unlock cq/qp_table lock
170 * - wait_event until ref count is zero
172 * It is the consumer's responsibilty to make sure that no QP
173 * operations (WQE posting or state modification) are pending when a
174 * QP is destroyed. Also, the consumer must make sure that calls to
175 * qp_modify are serialized. Similarly, the consumer is responsible
176 * for ensuring that no CQ resize operations are pending when a CQ
179 * Possible optimizations (wait for profile data to see if/where we
180 * have locks bouncing between CPUs):
181 * - split cq/qp table lock into n separate (cache-aligned) locks,
182 * indexed (say) by the page in the table
183 * - split QP struct lock into three (one for common info, one for the
184 * send queue and one for the receive queue)
187 struct mthca_cq_buf {
188 union mthca_buf queue;
193 struct mthca_cq_resize {
194 struct mthca_cq_buf buf;
209 struct mthca_cq_buf buf;
210 struct mthca_cq_resize *resize_buf;
213 /* Next fields are Arbel only */
220 wait_queue_head_t wait;
234 u16 counter; /* Arbel only */
235 int db_index; /* Arbel only */
236 __be32 *db; /* Arbel only */
241 union mthca_buf queue;
244 wait_queue_head_t wait;
259 int db_index; /* Arbel only */
268 u8 port; /* for SQP and memfree use only */
269 u8 alt_port; /* for memfree use only */
279 enum ib_sig_type sq_policy;
284 union mthca_buf queue;
286 wait_queue_head_t wait;
295 struct ib_ud_header ud_header;
298 dma_addr_t header_dma;
301 static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
303 return container_of(ibucontext, struct mthca_ucontext, ibucontext);
306 static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
308 return container_of(ibmr, struct mthca_fmr, ibmr);
311 static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
313 return container_of(ibmr, struct mthca_mr, ibmr);
316 static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
318 return container_of(ibpd, struct mthca_pd, ibpd);
321 static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
323 return container_of(ibah, struct mthca_ah, ibah);
326 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
328 return container_of(ibcq, struct mthca_cq, ibcq);
331 static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
333 return container_of(ibsrq, struct mthca_srq, ibsrq);
336 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
338 return container_of(ibqp, struct mthca_qp, ibqp);
341 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
343 return container_of(qp, struct mthca_sqp, qp);
346 #endif /* MTHCA_PROVIDER_H */