2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <infiniband/endian.h>
40 #include <infiniband/driver.h>
41 #include <infiniband/udma_barrier.h>
42 #include <infiniband/verbs.h>
44 #define MLX4_PORTS_NUM 2
49 MLX4_STAT_RATE_OFFSET = 5
53 MLX4_QP_TABLE_BITS = 8,
54 MLX4_QP_TABLE_SIZE = 1 << MLX4_QP_TABLE_BITS,
55 MLX4_QP_TABLE_MASK = MLX4_QP_TABLE_SIZE - 1
58 #define MLX4_REMOTE_SRQN_FLAGS(wr) htobe32(wr->qp_type.xrc.remote_srqn << 8)
61 MLX4_XSRQ_TABLE_BITS = 8,
62 MLX4_XSRQ_TABLE_SIZE = 1 << MLX4_XSRQ_TABLE_BITS,
63 MLX4_XSRQ_TABLE_MASK = MLX4_XSRQ_TABLE_SIZE - 1
66 struct mlx4_xsrq_table {
68 struct mlx4_srq **table;
70 } xsrq_table[MLX4_XSRQ_TABLE_SIZE];
72 pthread_mutex_t mutex;
79 MLX4_XRC_QPN_BIT = (1 << 23)
89 MLX4_OPCODE_NOP = 0x00,
90 MLX4_OPCODE_SEND_INVAL = 0x01,
91 MLX4_OPCODE_RDMA_WRITE = 0x08,
92 MLX4_OPCODE_RDMA_WRITE_IMM = 0x09,
93 MLX4_OPCODE_SEND = 0x0a,
94 MLX4_OPCODE_SEND_IMM = 0x0b,
95 MLX4_OPCODE_LSO = 0x0e,
96 MLX4_OPCODE_RDMA_READ = 0x10,
97 MLX4_OPCODE_ATOMIC_CS = 0x11,
98 MLX4_OPCODE_ATOMIC_FA = 0x12,
99 MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14,
100 MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15,
101 MLX4_OPCODE_BIND_MW = 0x18,
102 MLX4_OPCODE_FMR = 0x19,
103 MLX4_OPCODE_LOCAL_INVAL = 0x1b,
104 MLX4_OPCODE_CONFIG_CMD = 0x1f,
106 MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
107 MLX4_RECV_OPCODE_SEND = 0x01,
108 MLX4_RECV_OPCODE_SEND_IMM = 0x02,
109 MLX4_RECV_OPCODE_SEND_INVAL = 0x03,
111 MLX4_CQE_OPCODE_ERROR = 0x1e,
112 MLX4_CQE_OPCODE_RESIZE = 0x16,
116 struct verbs_device verbs_dev;
123 struct mlx4_context {
124 struct ibv_context ibv_ctx;
127 pthread_spinlock_t uar_lock;
132 pthread_spinlock_t bf_lock;
135 struct mlx4_qp **table;
137 } qp_table[MLX4_QP_TABLE_SIZE];
138 pthread_mutex_t qp_table_mutex;
145 struct mlx4_db_page *db_list[MLX4_NUM_DB_TYPE];
146 pthread_mutex_t db_list_mutex;
148 struct mlx4_xsrq_table xsrq_table;
152 enum ibv_port_cap_flags caps;
153 } port_query_cache[MLX4_PORTS_NUM];
156 uint8_t offset_valid;
158 void *hca_core_clock;
167 struct ibv_pd ibv_pd;
172 MLX4_CQ_FLAGS_RX_CSUM_VALID = 1 << 0,
173 MLX4_CQ_FLAGS_EXTENDED = 1 << 1,
174 MLX4_CQ_FLAGS_SINGLE_THREADED = 1 << 2,
178 struct ibv_cq_ex ibv_cq;
180 struct mlx4_buf resize_buf;
181 pthread_spinlock_t lock;
188 struct mlx4_qp *cur_qp;
189 struct mlx4_cqe *cqe;
194 struct verbs_srq verbs_srq;
196 pthread_spinlock_t lock;
211 pthread_spinlock_t lock;
222 struct verbs_qp verbs_qp;
227 uint32_t doorbell_qpn;
228 uint32_t sq_signal_bits;
236 uint32_t qp_cap_cache;
248 uint32_t sl_tclass_flowlabel;
253 struct ibv_ah ibv_ah;
260 MLX4_CSUM_SUPPORT_UD_OVER_IB = (1 << 0),
261 MLX4_CSUM_SUPPORT_RAW_OVER_ETH = (1 << 1),
262 /* Only report rx checksum when the validation is valid */
263 MLX4_RX_CSUM_VALID = (1 << 16),
266 enum mlx4_cqe_status {
267 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK = (1 << 2),
268 MLX4_CQE_STATUS_IPV4_PKT = (1 << 22),
269 MLX4_CQE_STATUS_IP_HDR_CSUM_OK = (1 << 28),
270 MLX4_CQE_STATUS_IPV4_CSUM_OK = MLX4_CQE_STATUS_IPV4_PKT |
271 MLX4_CQE_STATUS_IP_HDR_CSUM_OK |
272 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK
276 uint32_t vlan_my_qpn;
277 uint32_t immed_rss_invalid;
278 uint32_t g_mlpath_rqpn;
293 uint8_t owner_sr_opcode;
296 static inline unsigned long align(unsigned long val, unsigned long align)
298 return (val + align - 1) & ~(align - 1);
300 int align_queue_size(int req);
302 #define to_mxxx(xxx, type) \
303 ((struct mlx4_##type *) \
304 ((void *) ib##xxx - offsetof(struct mlx4_##type, ibv_##xxx)))
306 static inline struct mlx4_device *to_mdev(struct ibv_device *ibdev)
308 /* ibv_device is first field of verbs_device
309 * see try_driver() in libibverbs.
311 return container_of(ibdev, struct mlx4_device, verbs_dev);
314 static inline struct mlx4_context *to_mctx(struct ibv_context *ibctx)
316 return to_mxxx(ctx, context);
319 static inline struct mlx4_pd *to_mpd(struct ibv_pd *ibpd)
321 return to_mxxx(pd, pd);
324 static inline struct mlx4_cq *to_mcq(struct ibv_cq *ibcq)
326 return to_mxxx(cq, cq);
329 static inline struct mlx4_srq *to_msrq(struct ibv_srq *ibsrq)
331 return container_of(container_of(ibsrq, struct verbs_srq, srq),
332 struct mlx4_srq, verbs_srq);
335 static inline struct mlx4_qp *to_mqp(struct ibv_qp *ibqp)
337 return container_of(container_of(ibqp, struct verbs_qp, qp),
338 struct mlx4_qp, verbs_qp);
341 static inline struct mlx4_ah *to_mah(struct ibv_ah *ibah)
343 return to_mxxx(ah, ah);
346 static inline void mlx4_update_cons_index(struct mlx4_cq *cq)
348 *cq->set_ci_db = htobe32(cq->cons_index & 0xffffff);
351 int mlx4_alloc_buf(struct mlx4_buf *buf, size_t size, int page_size);
352 void mlx4_free_buf(struct mlx4_buf *buf);
354 uint32_t *mlx4_alloc_db(struct mlx4_context *context, enum mlx4_db_type type);
355 void mlx4_free_db(struct mlx4_context *context, enum mlx4_db_type type, uint32_t *db);
357 int mlx4_query_device(struct ibv_context *context,
358 struct ibv_device_attr *attr);
359 int mlx4_query_device_ex(struct ibv_context *context,
360 const struct ibv_query_device_ex_input *input,
361 struct ibv_device_attr_ex *attr,
363 int mlx4_query_port(struct ibv_context *context, uint8_t port,
364 struct ibv_port_attr *attr);
365 int mlx4_query_rt_values(struct ibv_context *context,
366 struct ibv_values_ex *values);
367 struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context);
368 int mlx4_free_pd(struct ibv_pd *pd);
369 struct ibv_xrcd *mlx4_open_xrcd(struct ibv_context *context,
370 struct ibv_xrcd_init_attr *attr);
371 int mlx4_close_xrcd(struct ibv_xrcd *xrcd);
373 struct ibv_mr *mlx4_reg_mr(struct ibv_pd *pd, void *addr,
374 size_t length, int access);
375 int mlx4_rereg_mr(struct ibv_mr *mr, int flags, struct ibv_pd *pd,
376 void *addr, size_t length, int access);
377 int mlx4_dereg_mr(struct ibv_mr *mr);
379 struct ibv_mw *mlx4_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type);
380 int mlx4_dealloc_mw(struct ibv_mw *mw);
381 int mlx4_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
382 struct ibv_mw_bind *mw_bind);
384 struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
385 struct ibv_comp_channel *channel,
387 struct ibv_cq_ex *mlx4_create_cq_ex(struct ibv_context *context,
388 struct ibv_cq_init_attr_ex *cq_attr);
389 void mlx4_cq_fill_pfns(struct mlx4_cq *cq, const struct ibv_cq_init_attr_ex *cq_attr);
390 int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent,
392 int mlx4_resize_cq(struct ibv_cq *cq, int cqe);
393 int mlx4_destroy_cq(struct ibv_cq *cq);
394 int mlx4_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
395 int mlx4_arm_cq(struct ibv_cq *cq, int solicited);
396 void mlx4_cq_event(struct ibv_cq *cq);
397 void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq);
398 void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq);
399 int mlx4_get_outstanding_cqes(struct mlx4_cq *cq);
400 void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int new_cqe);
402 struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
403 struct ibv_srq_init_attr *attr);
404 struct ibv_srq *mlx4_create_srq_ex(struct ibv_context *context,
405 struct ibv_srq_init_attr_ex *attr_ex);
406 struct ibv_srq *mlx4_create_xrc_srq(struct ibv_context *context,
407 struct ibv_srq_init_attr_ex *attr_ex);
408 int mlx4_modify_srq(struct ibv_srq *srq,
409 struct ibv_srq_attr *attr,
411 int mlx4_query_srq(struct ibv_srq *srq,
412 struct ibv_srq_attr *attr);
413 int mlx4_destroy_srq(struct ibv_srq *srq);
414 int mlx4_destroy_xrc_srq(struct ibv_srq *srq);
415 int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
416 struct mlx4_srq *srq);
417 void mlx4_init_xsrq_table(struct mlx4_xsrq_table *xsrq_table, int size);
418 struct mlx4_srq *mlx4_find_xsrq(struct mlx4_xsrq_table *xsrq_table, uint32_t srqn);
419 int mlx4_store_xsrq(struct mlx4_xsrq_table *xsrq_table, uint32_t srqn,
420 struct mlx4_srq *srq);
421 void mlx4_clear_xsrq(struct mlx4_xsrq_table *xsrq_table, uint32_t srqn);
422 void mlx4_free_srq_wqe(struct mlx4_srq *srq, int ind);
423 int mlx4_post_srq_recv(struct ibv_srq *ibsrq,
424 struct ibv_recv_wr *wr,
425 struct ibv_recv_wr **bad_wr);
427 struct ibv_qp *mlx4_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr);
428 struct ibv_qp *mlx4_create_qp_ex(struct ibv_context *context,
429 struct ibv_qp_init_attr_ex *attr);
430 struct ibv_qp *mlx4_open_qp(struct ibv_context *context, struct ibv_qp_open_attr *attr);
431 int mlx4_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
433 struct ibv_qp_init_attr *init_attr);
434 int mlx4_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
436 int mlx4_destroy_qp(struct ibv_qp *qp);
437 void mlx4_init_qp_indices(struct mlx4_qp *qp);
438 void mlx4_qp_init_sq_ownership(struct mlx4_qp *qp);
439 int mlx4_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
440 struct ibv_send_wr **bad_wr);
441 int mlx4_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
442 struct ibv_recv_wr **bad_wr);
443 void mlx4_calc_sq_wqe_size(struct ibv_qp_cap *cap, enum ibv_qp_type type,
445 int mlx4_alloc_qp_buf(struct ibv_context *context, struct ibv_qp_cap *cap,
446 enum ibv_qp_type type, struct mlx4_qp *qp);
447 void mlx4_set_sq_sizes(struct mlx4_qp *qp, struct ibv_qp_cap *cap,
448 enum ibv_qp_type type);
449 struct mlx4_qp *mlx4_find_qp(struct mlx4_context *ctx, uint32_t qpn);
450 int mlx4_store_qp(struct mlx4_context *ctx, uint32_t qpn, struct mlx4_qp *qp);
451 void mlx4_clear_qp(struct mlx4_context *ctx, uint32_t qpn);
452 struct ibv_ah *mlx4_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);
453 int mlx4_destroy_ah(struct ibv_ah *ah);
454 int mlx4_alloc_av(struct mlx4_pd *pd, struct ibv_ah_attr *attr,
456 void mlx4_free_av(struct mlx4_ah *ah);