2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #if !defined(IB_VERBS_H)
42 #include <linux/types.h>
43 #include <linux/device.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 #include <linux/if_ether.h>
52 #include <linux/mutex.h>
54 #include <asm/uaccess.h>
56 extern struct workqueue_struct *ib_wq;
67 /* IB values map to NodeInfo:NodeType. */
75 enum rdma_transport_type {
81 enum rdma_transport_type
82 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
84 enum rdma_link_layer {
85 IB_LINK_LAYER_UNSPECIFIED,
86 IB_LINK_LAYER_INFINIBAND,
87 IB_LINK_LAYER_ETHERNET,
91 enum ib_device_cap_flags {
92 IB_DEVICE_RESIZE_MAX_WR = 1,
93 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
94 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
95 IB_DEVICE_RAW_MULTI = (1<<3),
96 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
97 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
98 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
99 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
100 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
101 IB_DEVICE_INIT_TYPE = (1<<9),
102 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
103 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
104 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
105 IB_DEVICE_SRQ_RESIZE = (1<<13),
106 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
107 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
108 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
109 IB_DEVICE_MEM_WINDOW = (1<<17),
111 * Devices should set IB_DEVICE_UD_IP_SUM if they support
112 * insertion of UDP and TCP checksum on outgoing UD IPoIB
113 * messages and can verify the validity of checksum for
114 * incoming messages. Setting this flag implies that the
115 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
117 IB_DEVICE_UD_IP_CSUM = (1<<18),
118 IB_DEVICE_UD_TSO = (1<<19),
119 IB_DEVICE_XRC = (1<<20),
120 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
121 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
122 IB_DEVICE_MR_ALLOCATE = (1<<23),
123 IB_DEVICE_SHARED_MR = (1<<24),
124 IB_DEVICE_QPG = (1<<25),
125 IB_DEVICE_UD_RSS = (1<<26),
126 IB_DEVICE_UD_TSS = (1<<27),
127 IB_DEVICE_CROSS_CHANNEL = (1<<28),
128 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
130 * Devices can set either IB_DEVICE_MEM_WINDOW_TYPE_2A or
131 * IB_DEVICE_MEM_WINDOW_TYPE_2B if it supports type 2A or type 2B
132 * memory windows. It can set neither to indicate it doesn't support
133 * type 2 windows at all.
135 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<30),
136 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<31),
137 IB_DEVICE_SIGNATURE_HANDOVER = (1LL<<32)
140 enum ib_signature_prot_cap {
141 IB_PROT_T10DIF_TYPE_1 = 1,
142 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
143 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
146 enum ib_signature_guard_cap {
147 IB_GUARD_T10DIF_CRC = 1,
148 IB_GUARD_T10DIF_CSUM = 1 << 1,
157 enum ib_cq_create_flags {
158 IB_CQ_CREATE_CROSS_CHANNEL = 1 << 0,
159 IB_CQ_TIMESTAMP = 1 << 1,
160 IB_CQ_TIMESTAMP_TO_SYS_TIME = 1 << 2
163 struct ib_device_attr {
165 __be64 sys_image_guid;
173 u64 device_cap_flags;
183 int max_qp_init_rd_atom;
184 int max_ee_init_rd_atom;
185 enum ib_atomic_cap atomic_cap;
186 enum ib_atomic_cap masked_atomic_cap;
193 int max_mcast_qp_attach;
194 int max_total_mcast_qp_attach;
201 unsigned int max_fast_reg_page_list_len;
204 u8 local_ca_ack_delay;
206 uint64_t timestamp_mask;
207 uint64_t hca_core_clock;
208 unsigned int sig_prot_cap;
209 unsigned int sig_guard_cap;
212 enum ib_device_attr_comp_mask {
213 IB_DEVICE_ATTR_WITH_TIMESTAMP_MASK = 1ULL << 1,
214 IB_DEVICE_ATTR_WITH_HCA_CORE_CLOCK = 1ULL << 2
225 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
228 case IB_MTU_256: return 256;
229 case IB_MTU_512: return 512;
230 case IB_MTU_1024: return 1024;
231 case IB_MTU_2048: return 2048;
232 case IB_MTU_4096: return 4096;
243 IB_PORT_ACTIVE_DEFER = 5,
244 IB_PORT_DUMMY = -1 /* force enum signed */
247 enum ib_port_cap_flags {
249 IB_PORT_NOTICE_SUP = 1 << 2,
250 IB_PORT_TRAP_SUP = 1 << 3,
251 IB_PORT_OPT_IPD_SUP = 1 << 4,
252 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
253 IB_PORT_SL_MAP_SUP = 1 << 6,
254 IB_PORT_MKEY_NVRAM = 1 << 7,
255 IB_PORT_PKEY_NVRAM = 1 << 8,
256 IB_PORT_LED_INFO_SUP = 1 << 9,
257 IB_PORT_SM_DISABLED = 1 << 10,
258 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
259 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
260 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
261 IB_PORT_CM_SUP = 1 << 16,
262 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
263 IB_PORT_REINIT_SUP = 1 << 18,
264 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
265 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
266 IB_PORT_DR_NOTICE_SUP = 1 << 21,
267 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
268 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
269 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
270 IB_PORT_CLIENT_REG_SUP = 1 << 25
280 static inline int ib_width_enum_to_int(enum ib_port_width width)
283 case IB_WIDTH_1X: return 1;
284 case IB_WIDTH_4X: return 4;
285 case IB_WIDTH_8X: return 8;
286 case IB_WIDTH_12X: return 12;
300 struct ib_protocol_stats {
304 struct iw_protocol_stats {
307 u64 ipInTooBigErrors;
310 u64 ipInUnknownProtos;
311 u64 ipInTruncatedPkts;
314 u64 ipOutForwDatagrams;
346 union rdma_protocol_stats {
347 struct ib_protocol_stats ib;
348 struct iw_protocol_stats iw;
351 struct ib_port_attr {
352 enum ib_port_state state;
354 enum ib_mtu active_mtu;
373 enum ib_device_modify_flags {
374 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
375 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
378 struct ib_device_modify {
383 enum ib_port_modify_flags {
384 IB_PORT_SHUTDOWN = 1,
385 IB_PORT_INIT_TYPE = (1<<2),
386 IB_PORT_RESET_QKEY_CNTR = (1<<3)
389 struct ib_port_modify {
390 u32 set_port_cap_mask;
391 u32 clr_port_cap_mask;
399 IB_EVENT_QP_ACCESS_ERR,
403 IB_EVENT_PATH_MIG_ERR,
404 IB_EVENT_DEVICE_FATAL,
405 IB_EVENT_PORT_ACTIVE,
408 IB_EVENT_PKEY_CHANGE,
411 IB_EVENT_SRQ_LIMIT_REACHED,
412 IB_EVENT_QP_LAST_WQE_REACHED,
413 IB_EVENT_CLIENT_REREGISTER,
418 struct ib_device *device;
425 enum ib_event_type event;
428 struct ib_event_handler {
429 struct ib_device *device;
430 void (*handler)(struct ib_event_handler *, struct ib_event *);
431 struct list_head list;
434 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
436 (_ptr)->device = _device; \
437 (_ptr)->handler = _handler; \
438 INIT_LIST_HEAD(&(_ptr)->list); \
441 struct ib_global_route {
450 __be32 version_tclass_flow;
459 IB_MULTICAST_QPN = 0xffffff
462 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
469 IB_RATE_PORT_CURRENT = 0,
470 IB_RATE_2_5_GBPS = 2,
478 IB_RATE_120_GBPS = 10,
479 IB_RATE_14_GBPS = 11,
480 IB_RATE_56_GBPS = 12,
481 IB_RATE_112_GBPS = 13,
482 IB_RATE_168_GBPS = 14,
483 IB_RATE_25_GBPS = 15,
484 IB_RATE_100_GBPS = 16,
485 IB_RATE_200_GBPS = 17,
486 IB_RATE_300_GBPS = 18
489 enum ib_mr_create_flags {
490 IB_MR_SIGNATURE_EN = 1,
494 * ib_mr_init_attr - Memory region init attributes passed to routine
496 * @max_reg_descriptors: max number of registration descriptors that
497 * may be used with registration work requests.
498 * @flags: MR creation flags bit mask.
500 struct ib_mr_init_attr {
501 int max_reg_descriptors;
506 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
507 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
508 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
509 * @rate: rate to convert.
511 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
514 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
515 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
516 * @rate: rate to convert.
518 int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
520 struct ib_cq_init_attr {
526 enum ib_signature_type {
531 * T10-DIF Signature types
532 * T10-DIF types are defined by SCSI
535 enum ib_t10_dif_type {
543 * Signature T10-DIF block-guard types
544 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
545 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
547 enum ib_t10_dif_bg_type {
553 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
555 * @type: T10-DIF type (0|1|2|3)
556 * @bg_type: T10-DIF block guard type (CRC|CSUM)
557 * @pi_interval: protection information interval.
558 * @bg: seed of guard computation.
559 * @app_tag: application tag of guard block
560 * @ref_tag: initial guard block reference tag.
561 * @type3_inc_reftag: T10-DIF type 3 does not state
562 * about the reference tag, it is the user
563 * choice to increment it or not.
565 struct ib_t10_dif_domain {
566 enum ib_t10_dif_type type;
567 enum ib_t10_dif_bg_type bg_type;
572 bool type3_inc_reftag;
576 * struct ib_sig_domain - Parameters for signature domain
577 * @sig_type: specific signauture type
578 * @sig: union of all signature domain attributes that may
579 * be used to set domain layout.
581 struct ib_sig_domain {
582 enum ib_signature_type sig_type;
584 struct ib_t10_dif_domain dif;
589 * struct ib_sig_attrs - Parameters for signature handover operation
590 * @check_mask: bitmask for signature byte check (8 bytes)
591 * @mem: memory domain layout desciptor.
592 * @wire: wire domain layout desciptor.
594 struct ib_sig_attrs {
596 struct ib_sig_domain mem;
597 struct ib_sig_domain wire;
600 enum ib_sig_err_type {
607 * struct ib_sig_err - signature error descriptor
610 enum ib_sig_err_type err_type;
617 enum ib_mr_status_check {
618 IB_MR_CHECK_SIG_STATUS = 1,
622 * struct ib_mr_status - Memory region status container
624 * @fail_status: Bitmask of MR checks status. For each
625 * failed check a corresponding status bit is set.
626 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
629 struct ib_mr_status {
631 struct ib_sig_err sig_err;
635 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
637 * @mult: multiple to convert.
639 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
642 struct ib_global_route grh;
657 IB_WC_LOC_EEC_OP_ERR,
662 IB_WC_LOC_ACCESS_ERR,
663 IB_WC_REM_INV_REQ_ERR,
664 IB_WC_REM_ACCESS_ERR,
667 IB_WC_RNR_RETRY_EXC_ERR,
668 IB_WC_LOC_RDD_VIOL_ERR,
669 IB_WC_REM_INV_RD_REQ_ERR,
672 IB_WC_INV_EEC_STATE_ERR,
674 IB_WC_RESP_TIMEOUT_ERR,
688 IB_WC_MASKED_COMP_SWAP,
689 IB_WC_MASKED_FETCH_ADD,
691 * Set value of IB_WC_RECV so consumers can test if a completion is a
692 * receive by testing (opcode & IB_WC_RECV).
695 IB_WC_RECV_RDMA_WITH_IMM
700 IB_WC_WITH_IMM = (1<<1),
701 IB_WC_WITH_INVALIDATE = (1<<2),
702 IB_WC_IP_CSUM_OK = (1<<3),
703 IB_WC_WITH_SL = (1<<4),
704 IB_WC_WITH_SLID = (1<<5),
705 IB_WC_WITH_TIMESTAMP = (1<<6),
706 IB_WC_WITH_SMAC = (1<<7),
707 IB_WC_WITH_VLAN = (1<<8),
712 enum ib_wc_status status;
713 enum ib_wc_opcode opcode;
727 u8 port_num; /* valid only for DR SMPs on switches */
730 uint64_t timestamp; /* timestamp = 0 indicates error*/
736 enum ib_cq_notify_flags {
737 IB_CQ_SOLICITED = 1 << 0,
738 IB_CQ_NEXT_COMP = 1 << 1,
739 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
740 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
748 enum ib_srq_attr_mask {
749 IB_SRQ_MAX_WR = 1 << 0,
750 IB_SRQ_LIMIT = 1 << 1,
759 struct ib_srq_init_attr {
760 void (*event_handler)(struct ib_event *, void *);
762 struct ib_srq_attr attr;
763 enum ib_srq_type srq_type;
767 struct ib_xrcd *xrcd;
789 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
790 * here (and in that order) since the MAD layer uses them as
791 * indices into a 2-entry table.
800 IB_QPT_RAW_ETHERTYPE,
801 IB_QPT_RAW_PACKET = 8,
806 /* Reserve a range for qp types internal to the low level driver.
807 * These qp types will not be visible at the IB core layer, so the
808 * IB_QPT_MAX usages should not be affected in the core layer
810 IB_QPT_RESERVED1 = 0x1000,
822 enum ib_qp_create_flags {
823 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
824 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
825 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
826 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
827 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
828 IB_QP_CREATE_NETIF_QP = 1 << 5,
829 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
830 /* reserve bits 26-31 for low level drivers' internal use */
831 IB_QP_CREATE_RESERVED_START = 1 << 26,
832 IB_QP_CREATE_RESERVED_END = 1 << 31,
837 IB_QPG_PARENT = (1<<0),
838 IB_QPG_CHILD_RX = (1<<1),
839 IB_QPG_CHILD_TX = (1<<2)
842 struct ib_qpg_init_attrib {
847 struct ib_qp_init_attr {
848 void (*event_handler)(struct ib_event *, void *);
850 struct ib_cq *send_cq;
851 struct ib_cq *recv_cq;
853 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
854 struct ib_qp_cap cap;
856 struct ib_qp *qpg_parent; /* see qpg_type */
857 struct ib_qpg_init_attrib parent_attrib;
859 enum ib_sig_type sq_sig_type;
860 enum ib_qp_type qp_type;
861 enum ib_qp_create_flags create_flags;
862 enum ib_qpg_type qpg_type;
863 u8 port_num; /* special QP types only */
867 IB_DCT_CREATE_FLAG_RCV_INLINE = 1 << 0,
868 IB_DCT_CREATE_FLAGS_MASK = IB_DCT_CREATE_FLAG_RCV_INLINE,
871 struct ib_dct_init_attr {
903 struct ib_qp_open_attr {
904 void (*event_handler)(struct ib_event *, void *);
907 enum ib_qp_type qp_type;
910 enum ib_rnr_timeout {
911 IB_RNR_TIMER_655_36 = 0,
912 IB_RNR_TIMER_000_01 = 1,
913 IB_RNR_TIMER_000_02 = 2,
914 IB_RNR_TIMER_000_03 = 3,
915 IB_RNR_TIMER_000_04 = 4,
916 IB_RNR_TIMER_000_06 = 5,
917 IB_RNR_TIMER_000_08 = 6,
918 IB_RNR_TIMER_000_12 = 7,
919 IB_RNR_TIMER_000_16 = 8,
920 IB_RNR_TIMER_000_24 = 9,
921 IB_RNR_TIMER_000_32 = 10,
922 IB_RNR_TIMER_000_48 = 11,
923 IB_RNR_TIMER_000_64 = 12,
924 IB_RNR_TIMER_000_96 = 13,
925 IB_RNR_TIMER_001_28 = 14,
926 IB_RNR_TIMER_001_92 = 15,
927 IB_RNR_TIMER_002_56 = 16,
928 IB_RNR_TIMER_003_84 = 17,
929 IB_RNR_TIMER_005_12 = 18,
930 IB_RNR_TIMER_007_68 = 19,
931 IB_RNR_TIMER_010_24 = 20,
932 IB_RNR_TIMER_015_36 = 21,
933 IB_RNR_TIMER_020_48 = 22,
934 IB_RNR_TIMER_030_72 = 23,
935 IB_RNR_TIMER_040_96 = 24,
936 IB_RNR_TIMER_061_44 = 25,
937 IB_RNR_TIMER_081_92 = 26,
938 IB_RNR_TIMER_122_88 = 27,
939 IB_RNR_TIMER_163_84 = 28,
940 IB_RNR_TIMER_245_76 = 29,
941 IB_RNR_TIMER_327_68 = 30,
942 IB_RNR_TIMER_491_52 = 31
945 enum ib_qp_attr_mask {
947 IB_QP_CUR_STATE = (1<<1),
948 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
949 IB_QP_ACCESS_FLAGS = (1<<3),
950 IB_QP_PKEY_INDEX = (1<<4),
954 IB_QP_PATH_MTU = (1<<8),
955 IB_QP_TIMEOUT = (1<<9),
956 IB_QP_RETRY_CNT = (1<<10),
957 IB_QP_RNR_RETRY = (1<<11),
958 IB_QP_RQ_PSN = (1<<12),
959 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
960 IB_QP_ALT_PATH = (1<<14),
961 IB_QP_MIN_RNR_TIMER = (1<<15),
962 IB_QP_SQ_PSN = (1<<16),
963 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
964 IB_QP_PATH_MIG_STATE = (1<<18),
966 IB_QP_DEST_QPN = (1<<20),
967 IB_QP_GROUP_RSS = (1<<21),
968 IB_QP_DC_KEY = (1<<22),
969 IB_QP_SMAC = (1<<23),
970 IB_QP_ALT_SMAC = (1<<24),
972 IB_QP_ALT_VID = (1<<26)
983 IB_QPS_DUMMY = -1 /* force enum signed */
998 enum ib_qp_state qp_state;
999 enum ib_qp_state cur_qp_state;
1000 enum ib_mtu path_mtu;
1001 enum ib_mig_state path_mig_state;
1006 int qp_access_flags;
1007 struct ib_qp_cap cap;
1008 struct ib_ah_attr ah_attr;
1009 struct ib_ah_attr alt_ah_attr;
1012 u8 en_sqd_async_notify;
1015 u8 max_dest_rd_atomic;
1024 u8 alt_smac[ETH_ALEN];
1030 struct ib_qp_attr_ex {
1031 enum ib_qp_state qp_state;
1032 enum ib_qp_state cur_qp_state;
1033 enum ib_mtu path_mtu;
1034 enum ib_mig_state path_mig_state;
1039 int qp_access_flags;
1040 struct ib_qp_cap cap;
1041 struct ib_ah_attr ah_attr;
1042 struct ib_ah_attr alt_ah_attr;
1045 u8 en_sqd_async_notify;
1048 u8 max_dest_rd_atomic;
1061 IB_WR_RDMA_WRITE_WITH_IMM,
1063 IB_WR_SEND_WITH_IMM,
1065 IB_WR_ATOMIC_CMP_AND_SWP,
1066 IB_WR_ATOMIC_FETCH_AND_ADD,
1068 IB_WR_SEND_WITH_INV,
1069 IB_WR_RDMA_READ_WITH_INV,
1072 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1073 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1076 /* reserve values for low level drivers' internal use.
1077 * These values will not be used at all in the ib core layer.
1079 IB_WR_RESERVED1 = 0xf0,
1091 enum ib_send_flags {
1093 IB_SEND_SIGNALED = (1<<1),
1094 IB_SEND_SOLICITED = (1<<2),
1095 IB_SEND_INLINE = (1<<3),
1096 IB_SEND_IP_CSUM = (1<<4),
1098 /* reserve bits 26-31 for low level drivers' internal use */
1099 IB_SEND_RESERVED_START = (1 << 26),
1100 IB_SEND_RESERVED_END = (1 << 31),
1101 IB_SEND_UMR_UNREG = (1<<5)
1110 struct ib_fast_reg_page_list {
1111 struct ib_device *device;
1113 unsigned int max_page_list_len;
1117 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
1118 * @mr: A memory region to bind the memory window to.
1119 * @addr: The address where the memory window should begin.
1120 * @length: The length of the memory window, in bytes.
1121 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
1123 * This struct contains the shared parameters for type 1 and type 2
1124 * memory window bind operations.
1126 struct ib_mw_bind_info {
1130 int mw_access_flags;
1134 struct ib_send_wr *next;
1136 struct ib_sge *sg_list;
1138 enum ib_wr_opcode opcode;
1142 u32 invalidate_rkey;
1153 u64 compare_add_mask;
1164 u16 pkey_index; /* valid for GSI only */
1165 u8 port_num; /* valid for DR SMPs on switch only */
1169 struct ib_fast_reg_page_list *page_list;
1170 unsigned int page_shift;
1171 unsigned int page_list_len;
1187 /* The new rkey for the memory window. */
1189 struct ib_mw_bind_info bind_info;
1192 struct ib_sig_attrs *sig_attrs;
1193 struct ib_mr *sig_mr;
1195 struct ib_sge *prot;
1198 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
1202 struct ib_recv_wr *next;
1204 struct ib_sge *sg_list;
1208 enum ib_access_flags {
1209 IB_ACCESS_LOCAL_WRITE = 1,
1210 IB_ACCESS_REMOTE_WRITE = (1<<1),
1211 IB_ACCESS_REMOTE_READ = (1<<2),
1212 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1213 IB_ACCESS_MW_BIND = (1<<4),
1214 IB_ACCESS_ALLOCATE_MR = (1<<5),
1215 IB_ZERO_BASED = (1<<13)
1218 struct ib_phys_buf {
1225 u64 device_virt_addr;
1227 int mr_access_flags;
1232 enum ib_mr_rereg_flags {
1233 IB_MR_REREG_TRANS = 1,
1234 IB_MR_REREG_PD = (1<<1),
1235 IB_MR_REREG_ACCESS = (1<<2)
1239 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1240 * @wr_id: Work request id.
1241 * @send_flags: Flags from ib_send_flags enum.
1242 * @bind_info: More parameters of the bind operation.
1247 struct ib_mw_bind_info bind_info;
1250 struct ib_fmr_attr {
1256 struct ib_ucontext {
1257 struct ib_device *device;
1258 struct list_head pd_list;
1259 struct list_head mr_list;
1260 struct list_head mw_list;
1261 struct list_head cq_list;
1262 struct list_head qp_list;
1263 struct list_head srq_list;
1264 struct list_head ah_list;
1265 struct list_head xrcd_list;
1266 struct list_head rule_list;
1267 struct list_head dct_list;
1269 void *peer_mem_private_data;
1270 char *peer_mem_name;
1274 u64 user_handle; /* handle given to us by userspace */
1275 struct ib_ucontext *context; /* associated user context */
1276 void *object; /* containing object */
1277 struct list_head list; /* link to context's list */
1278 int id; /* index into kernel idr */
1280 struct rw_semaphore mutex; /* protects .live */
1285 struct ib_udata_ops {
1286 int (*copy_from)(void *dest, struct ib_udata *udata,
1288 int (*copy_to)(struct ib_udata *udata, void *src,
1293 struct ib_udata_ops *ops;
1295 void __user *outbuf;
1301 struct ib_device *device;
1302 struct ib_uobject *uobject;
1303 atomic_t usecnt; /* count all resources */
1307 struct ib_device *device;
1308 atomic_t usecnt; /* count all exposed resources */
1309 struct inode *inode;
1311 struct mutex tgt_qp_mutex;
1312 struct list_head tgt_qp_list;
1316 struct ib_device *device;
1318 struct ib_uobject *uobject;
1321 enum ib_cq_attr_mask {
1322 IB_CQ_MODERATION = (1 << 0),
1323 IB_CQ_CAP_FLAGS = (1 << 1)
1326 enum ib_cq_cap_flags {
1327 IB_CQ_IGNORE_OVERRUN = (1 << 0)
1338 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1341 struct ib_device *device;
1342 struct ib_uobject *uobject;
1343 ib_comp_handler comp_handler;
1344 void (*event_handler)(struct ib_event *, void *);
1347 atomic_t usecnt; /* count number of work queues */
1351 struct ib_device *device;
1353 struct ib_uobject *uobject;
1354 void (*event_handler)(struct ib_event *, void *);
1356 enum ib_srq_type srq_type;
1361 struct ib_xrcd *xrcd;
1369 struct ib_device *device;
1371 struct ib_cq *send_cq;
1372 struct ib_cq *recv_cq;
1374 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1375 struct list_head xrcd_list;
1376 /* count times opened, mcast attaches, flow attaches */
1378 struct list_head open_list;
1379 struct ib_qp *real_qp;
1380 struct ib_uobject *uobject;
1381 void (*event_handler)(struct ib_event *, void *);
1384 enum ib_qp_type qp_type;
1385 enum ib_qpg_type qpg_type;
1390 struct ib_device *device;
1391 struct ib_uobject *uobject;
1399 struct ib_device *device;
1401 struct ib_uobject *uobject;
1404 atomic_t usecnt; /* count number of MWs */
1408 struct ib_device *device;
1410 struct ib_uobject *uobject;
1412 enum ib_mw_type type;
1416 struct ib_device *device;
1418 struct list_head list;
1423 /* Supported steering options */
1424 enum ib_flow_attr_type {
1425 /* steering according to rule specifications */
1426 IB_FLOW_ATTR_NORMAL = 0x0,
1427 /* default unicast and multicast rule -
1428 * receive all Eth traffic which isn't steered to any QP
1430 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1431 /* default multicast rule -
1432 * receive all Eth multicast traffic which isn't steered to any QP
1434 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1435 /* sniffer rule - receive all port traffic */
1436 IB_FLOW_ATTR_SNIFFER = 0x3
1439 /* Supported steering header types */
1440 enum ib_flow_spec_type {
1442 IB_FLOW_SPEC_ETH = 0x20,
1443 IB_FLOW_SPEC_IB = 0x21,
1445 IB_FLOW_SPEC_IPV4 = 0x30,
1447 IB_FLOW_SPEC_TCP = 0x40,
1448 IB_FLOW_SPEC_UDP = 0x41
1451 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1453 /* Flow steering rule priority is set according to it's domain.
1454 * Lower domain value means higher priority.
1456 enum ib_flow_domain {
1457 IB_FLOW_DOMAIN_USER,
1458 IB_FLOW_DOMAIN_ETHTOOL,
1461 IB_FLOW_DOMAIN_NUM /* Must be last */
1464 enum ib_flow_flags {
1465 IB_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1
1468 struct ib_flow_eth_filter {
1475 struct ib_flow_spec_eth {
1476 enum ib_flow_spec_type type;
1478 struct ib_flow_eth_filter val;
1479 struct ib_flow_eth_filter mask;
1482 struct ib_flow_ib_filter {
1487 struct ib_flow_spec_ib {
1488 enum ib_flow_spec_type type;
1490 struct ib_flow_ib_filter val;
1491 struct ib_flow_ib_filter mask;
1494 struct ib_flow_ipv4_filter {
1499 struct ib_flow_spec_ipv4 {
1500 enum ib_flow_spec_type type;
1502 struct ib_flow_ipv4_filter val;
1503 struct ib_flow_ipv4_filter mask;
1506 struct ib_flow_tcp_udp_filter {
1511 struct ib_flow_spec_tcp_udp {
1512 enum ib_flow_spec_type type;
1514 struct ib_flow_tcp_udp_filter val;
1515 struct ib_flow_tcp_udp_filter mask;
1518 union ib_flow_spec {
1520 enum ib_flow_spec_type type;
1523 struct ib_flow_spec_ib ib;
1524 struct ib_flow_spec_eth eth;
1525 struct ib_flow_spec_ipv4 ipv4;
1526 struct ib_flow_spec_tcp_udp tcp_udp;
1529 struct ib_flow_attr {
1530 enum ib_flow_attr_type type;
1536 /* Following are the optional layers according to user request
1537 * struct ib_flow_spec_xxx
1538 * struct ib_flow_spec_yyy
1544 struct ib_uobject *uobject;
1550 enum ib_process_mad_flags {
1551 IB_MAD_IGNORE_MKEY = 1,
1552 IB_MAD_IGNORE_BKEY = 2,
1553 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1556 enum ib_mad_result {
1557 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1558 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1559 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1560 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1563 #define IB_DEVICE_NAME_MAX 64
1567 struct ib_event_handler event_handler;
1568 struct ib_pkey_cache **pkey_cache;
1569 struct ib_gid_cache **gid_cache;
1573 enum verbs_values_mask {
1574 IBV_VALUES_HW_CLOCK = 1 << 0
1577 struct ib_device_values {
1582 struct ib_dma_mapping_ops {
1583 int (*mapping_error)(struct ib_device *dev,
1585 u64 (*map_single)(struct ib_device *dev,
1586 void *ptr, size_t size,
1587 enum dma_data_direction direction);
1588 void (*unmap_single)(struct ib_device *dev,
1589 u64 addr, size_t size,
1590 enum dma_data_direction direction);
1591 u64 (*map_page)(struct ib_device *dev,
1592 struct page *page, unsigned long offset,
1594 enum dma_data_direction direction);
1595 void (*unmap_page)(struct ib_device *dev,
1596 u64 addr, size_t size,
1597 enum dma_data_direction direction);
1598 int (*map_sg)(struct ib_device *dev,
1599 struct scatterlist *sg, int nents,
1600 enum dma_data_direction direction);
1601 void (*unmap_sg)(struct ib_device *dev,
1602 struct scatterlist *sg, int nents,
1603 enum dma_data_direction direction);
1604 u64 (*dma_address)(struct ib_device *dev,
1605 struct scatterlist *sg);
1606 unsigned int (*dma_len)(struct ib_device *dev,
1607 struct scatterlist *sg);
1608 void (*sync_single_for_cpu)(struct ib_device *dev,
1611 enum dma_data_direction dir);
1612 void (*sync_single_for_device)(struct ib_device *dev,
1615 enum dma_data_direction dir);
1616 void *(*alloc_coherent)(struct ib_device *dev,
1620 void (*free_coherent)(struct ib_device *dev,
1621 size_t size, void *cpu_addr,
1626 struct ib_exp_device_attr;
1627 struct ib_exp_qp_init_attr;
1630 struct device *dma_device;
1632 char name[IB_DEVICE_NAME_MAX];
1634 struct list_head event_handler_list;
1635 spinlock_t event_handler_lock;
1637 spinlock_t client_data_lock;
1638 struct list_head core_list;
1639 struct list_head client_data_list;
1641 struct ib_cache cache;
1645 int num_comp_vectors;
1647 struct iw_cm_verbs *iwcm;
1649 int (*get_protocol_stats)(struct ib_device *device,
1650 union rdma_protocol_stats *stats);
1651 int (*query_device)(struct ib_device *device,
1652 struct ib_device_attr *device_attr);
1653 int (*query_port)(struct ib_device *device,
1655 struct ib_port_attr *port_attr);
1656 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1658 int (*query_gid)(struct ib_device *device,
1659 u8 port_num, int index,
1661 int (*query_pkey)(struct ib_device *device,
1662 u8 port_num, u16 index, u16 *pkey);
1663 int (*modify_device)(struct ib_device *device,
1664 int device_modify_mask,
1665 struct ib_device_modify *device_modify);
1666 int (*modify_port)(struct ib_device *device,
1667 u8 port_num, int port_modify_mask,
1668 struct ib_port_modify *port_modify);
1669 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1670 struct ib_udata *udata);
1671 int (*dealloc_ucontext)(struct ib_ucontext *context);
1672 int (*mmap)(struct ib_ucontext *context,
1673 struct vm_area_struct *vma);
1674 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1675 struct ib_ucontext *context,
1676 struct ib_udata *udata);
1677 int (*dealloc_pd)(struct ib_pd *pd);
1678 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1679 struct ib_ah_attr *ah_attr);
1680 int (*modify_ah)(struct ib_ah *ah,
1681 struct ib_ah_attr *ah_attr);
1682 int (*query_ah)(struct ib_ah *ah,
1683 struct ib_ah_attr *ah_attr);
1684 int (*destroy_ah)(struct ib_ah *ah);
1685 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1686 struct ib_srq_init_attr *srq_init_attr,
1687 struct ib_udata *udata);
1688 int (*modify_srq)(struct ib_srq *srq,
1689 struct ib_srq_attr *srq_attr,
1690 enum ib_srq_attr_mask srq_attr_mask,
1691 struct ib_udata *udata);
1692 int (*query_srq)(struct ib_srq *srq,
1693 struct ib_srq_attr *srq_attr);
1694 int (*destroy_srq)(struct ib_srq *srq);
1695 int (*post_srq_recv)(struct ib_srq *srq,
1696 struct ib_recv_wr *recv_wr,
1697 struct ib_recv_wr **bad_recv_wr);
1698 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1699 struct ib_qp_init_attr *qp_init_attr,
1700 struct ib_udata *udata);
1701 int (*modify_qp)(struct ib_qp *qp,
1702 struct ib_qp_attr *qp_attr,
1704 struct ib_udata *udata);
1705 int (*query_qp)(struct ib_qp *qp,
1706 struct ib_qp_attr *qp_attr,
1708 struct ib_qp_init_attr *qp_init_attr);
1709 int (*destroy_qp)(struct ib_qp *qp);
1710 int (*post_send)(struct ib_qp *qp,
1711 struct ib_send_wr *send_wr,
1712 struct ib_send_wr **bad_send_wr);
1713 int (*post_recv)(struct ib_qp *qp,
1714 struct ib_recv_wr *recv_wr,
1715 struct ib_recv_wr **bad_recv_wr);
1716 struct ib_cq * (*create_cq)(struct ib_device *device,
1717 struct ib_cq_init_attr *attr,
1718 struct ib_ucontext *context,
1719 struct ib_udata *udata);
1720 int (*modify_cq)(struct ib_cq *cq,
1721 struct ib_cq_attr *cq_attr,
1723 int (*destroy_cq)(struct ib_cq *cq);
1724 int (*resize_cq)(struct ib_cq *cq, int cqe,
1725 struct ib_udata *udata);
1726 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1728 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1729 int (*req_notify_cq)(struct ib_cq *cq,
1730 enum ib_cq_notify_flags flags);
1731 int (*req_ncomp_notif)(struct ib_cq *cq,
1733 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1734 int mr_access_flags);
1735 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1736 struct ib_phys_buf *phys_buf_array,
1738 int mr_access_flags,
1740 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1741 u64 start, u64 length,
1743 int mr_access_flags,
1744 struct ib_udata *udata,
1746 int (*query_mr)(struct ib_mr *mr,
1747 struct ib_mr_attr *mr_attr);
1748 int (*dereg_mr)(struct ib_mr *mr);
1749 int (*destroy_mr)(struct ib_mr *mr);
1750 struct ib_mr * (*create_mr)(struct ib_pd *pd,
1751 struct ib_mr_init_attr *mr_init_attr);
1752 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1753 int max_page_list_len);
1754 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1756 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1757 int (*rereg_phys_mr)(struct ib_mr *mr,
1760 struct ib_phys_buf *phys_buf_array,
1762 int mr_access_flags,
1764 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1765 enum ib_mw_type type);
1766 int (*bind_mw)(struct ib_qp *qp,
1768 struct ib_mw_bind *mw_bind);
1769 int (*dealloc_mw)(struct ib_mw *mw);
1770 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1771 int mr_access_flags,
1772 struct ib_fmr_attr *fmr_attr);
1773 int (*map_phys_fmr)(struct ib_fmr *fmr,
1774 u64 *page_list, int list_len,
1776 int (*unmap_fmr)(struct list_head *fmr_list);
1777 int (*dealloc_fmr)(struct ib_fmr *fmr);
1778 int (*attach_mcast)(struct ib_qp *qp,
1781 int (*detach_mcast)(struct ib_qp *qp,
1784 int (*process_mad)(struct ib_device *device,
1785 int process_mad_flags,
1787 struct ib_wc *in_wc,
1788 struct ib_grh *in_grh,
1789 struct ib_mad *in_mad,
1790 struct ib_mad *out_mad);
1791 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1792 struct ib_ucontext *ucontext,
1793 struct ib_udata *udata);
1794 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1795 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1799 int (*destroy_flow)(struct ib_flow *flow_id);
1800 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1801 struct ib_mr_status *mr_status);
1803 unsigned long (*get_unmapped_area)(struct file *file,
1805 unsigned long len, unsigned long pgoff,
1806 unsigned long flags);
1807 int (*ioctl)(struct ib_ucontext *context,
1810 int (*query_values)(struct ib_device *device,
1812 struct ib_device_values *values);
1813 struct ib_dma_mapping_ops *dma_ops;
1815 struct module *owner;
1817 struct kobject *ports_parent;
1818 struct list_head port_list;
1821 IB_DEV_UNINITIALIZED,
1827 u64 uverbs_cmd_mask;
1828 u64 uverbs_ex_cmd_mask;
1838 spinlock_t cmd_perf_lock;
1841 * Experimental data and functions
1843 int (*exp_query_device)(struct ib_device *device,
1844 struct ib_exp_device_attr *device_attr);
1845 struct ib_qp * (*exp_create_qp)(struct ib_pd *pd,
1846 struct ib_exp_qp_init_attr *qp_init_attr,
1847 struct ib_udata *udata);
1848 struct ib_dct * (*exp_create_dct)(struct ib_pd *pd,
1849 struct ib_dct_init_attr *attr,
1850 struct ib_udata *udata);
1851 int (*exp_destroy_dct)(struct ib_dct *dct);
1852 int (*exp_query_dct)(struct ib_dct *dct, struct ib_dct_attr *attr);
1854 u64 uverbs_exp_cmd_mask;
1859 void (*add) (struct ib_device *);
1860 void (*remove)(struct ib_device *);
1862 struct list_head list;
1865 struct ib_device *ib_alloc_device(size_t size);
1866 void ib_dealloc_device(struct ib_device *device);
1868 int ib_register_device(struct ib_device *device,
1869 int (*port_callback)(struct ib_device *,
1870 u8, struct kobject *));
1871 void ib_unregister_device(struct ib_device *device);
1873 int ib_register_client (struct ib_client *client);
1874 void ib_unregister_client(struct ib_client *client);
1876 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1877 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1880 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1882 return udata->ops->copy_from(dest, udata, len);
1885 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1887 return udata->ops->copy_to(udata, src, len);
1891 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1892 * contains all required attributes and no attributes not allowed for
1893 * the given QP state transition.
1894 * @cur_state: Current QP state
1895 * @next_state: Next QP state
1897 * @mask: Mask of supplied QP attributes
1898 * @ll : link layer of port
1900 * This function is a helper function that a low-level driver's
1901 * modify_qp method can use to validate the consumer's input. It
1902 * checks that cur_state and next_state are valid QP states, that a
1903 * transition from cur_state to next_state is allowed by the IB spec,
1904 * and that the attribute mask supplied is allowed for the transition.
1906 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1907 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1908 enum rdma_link_layer ll);
1910 int ib_register_event_handler (struct ib_event_handler *event_handler);
1911 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1912 void ib_dispatch_event(struct ib_event *event);
1914 int ib_query_device(struct ib_device *device,
1915 struct ib_device_attr *device_attr);
1917 int ib_query_port(struct ib_device *device,
1918 u8 port_num, struct ib_port_attr *port_attr);
1920 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1923 int ib_query_gid(struct ib_device *device,
1924 u8 port_num, int index, union ib_gid *gid);
1926 int ib_query_pkey(struct ib_device *device,
1927 u8 port_num, u16 index, u16 *pkey);
1929 int ib_modify_device(struct ib_device *device,
1930 int device_modify_mask,
1931 struct ib_device_modify *device_modify);
1933 int ib_modify_port(struct ib_device *device,
1934 u8 port_num, int port_modify_mask,
1935 struct ib_port_modify *port_modify);
1937 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1938 u8 *port_num, u16 *index);
1940 int ib_find_pkey(struct ib_device *device,
1941 u8 port_num, u16 pkey, u16 *index);
1944 * ib_alloc_pd - Allocates an unused protection domain.
1945 * @device: The device on which to allocate the protection domain.
1947 * A protection domain object provides an association between QPs, shared
1948 * receive queues, address handles, memory regions, and memory windows.
1950 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1953 * ib_dealloc_pd - Deallocates a protection domain.
1954 * @pd: The protection domain to deallocate.
1956 int ib_dealloc_pd(struct ib_pd *pd);
1959 * ib_create_ah - Creates an address handle for the given address vector.
1960 * @pd: The protection domain associated with the address handle.
1961 * @ah_attr: The attributes of the address vector.
1963 * The address handle is used to reference a local or global destination
1964 * in all UD QP post sends.
1966 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1969 * ib_init_ah_from_wc - Initializes address handle attributes from a
1971 * @device: Device on which the received message arrived.
1972 * @port_num: Port on which the received message arrived.
1973 * @wc: Work completion associated with the received message.
1974 * @grh: References the received global route header. This parameter is
1975 * ignored unless the work completion indicates that the GRH is valid.
1976 * @ah_attr: Returned attributes that can be used when creating an address
1977 * handle for replying to the message.
1979 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1980 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1983 * ib_create_ah_from_wc - Creates an address handle associated with the
1984 * sender of the specified work completion.
1985 * @pd: The protection domain associated with the address handle.
1986 * @wc: Work completion information associated with a received message.
1987 * @grh: References the received global route header. This parameter is
1988 * ignored unless the work completion indicates that the GRH is valid.
1989 * @port_num: The outbound port number to associate with the address.
1991 * The address handle is used to reference a local or global destination
1992 * in all UD QP post sends.
1994 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1995 struct ib_grh *grh, u8 port_num);
1998 * ib_modify_ah - Modifies the address vector associated with an address
2000 * @ah: The address handle to modify.
2001 * @ah_attr: The new address vector attributes to associate with the
2004 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2007 * ib_query_ah - Queries the address vector associated with an address
2009 * @ah: The address handle to query.
2010 * @ah_attr: The address vector attributes associated with the address
2013 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2016 * ib_destroy_ah - Destroys an address handle.
2017 * @ah: The address handle to destroy.
2019 int ib_destroy_ah(struct ib_ah *ah);
2022 * ib_create_srq - Creates a SRQ associated with the specified protection
2024 * @pd: The protection domain associated with the SRQ.
2025 * @srq_init_attr: A list of initial attributes required to create the
2026 * SRQ. If SRQ creation succeeds, then the attributes are updated to
2027 * the actual capabilities of the created SRQ.
2029 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2030 * requested size of the SRQ, and set to the actual values allocated
2031 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
2032 * will always be at least as large as the requested values.
2034 struct ib_srq *ib_create_srq(struct ib_pd *pd,
2035 struct ib_srq_init_attr *srq_init_attr);
2038 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2039 * @srq: The SRQ to modify.
2040 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
2041 * the current values of selected SRQ attributes are returned.
2042 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2043 * are being modified.
2045 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2046 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2047 * the number of receives queued drops below the limit.
2049 int ib_modify_srq(struct ib_srq *srq,
2050 struct ib_srq_attr *srq_attr,
2051 enum ib_srq_attr_mask srq_attr_mask);
2054 * ib_query_srq - Returns the attribute list and current values for the
2056 * @srq: The SRQ to query.
2057 * @srq_attr: The attributes of the specified SRQ.
2059 int ib_query_srq(struct ib_srq *srq,
2060 struct ib_srq_attr *srq_attr);
2063 * ib_destroy_srq - Destroys the specified SRQ.
2064 * @srq: The SRQ to destroy.
2066 int ib_destroy_srq(struct ib_srq *srq);
2069 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2070 * @srq: The SRQ to post the work request on.
2071 * @recv_wr: A list of work requests to post on the receive queue.
2072 * @bad_recv_wr: On an immediate failure, this parameter will reference
2073 * the work request that failed to be posted on the QP.
2075 static inline int ib_post_srq_recv(struct ib_srq *srq,
2076 struct ib_recv_wr *recv_wr,
2077 struct ib_recv_wr **bad_recv_wr)
2079 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2083 * ib_create_qp - Creates a QP associated with the specified protection
2085 * @pd: The protection domain associated with the QP.
2086 * @qp_init_attr: A list of initial attributes required to create the
2087 * QP. If QP creation succeeds, then the attributes are updated to
2088 * the actual capabilities of the created QP.
2090 struct ib_qp *ib_create_qp(struct ib_pd *pd,
2091 struct ib_qp_init_attr *qp_init_attr);
2094 * ib_modify_qp - Modifies the attributes for the specified QP and then
2095 * transitions the QP to the given state.
2096 * @qp: The QP to modify.
2097 * @qp_attr: On input, specifies the QP attributes to modify. On output,
2098 * the current values of selected QP attributes are returned.
2099 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2100 * are being modified.
2102 int ib_modify_qp(struct ib_qp *qp,
2103 struct ib_qp_attr *qp_attr,
2107 * ib_query_qp - Returns the attribute list and current values for the
2109 * @qp: The QP to query.
2110 * @qp_attr: The attributes of the specified QP.
2111 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2112 * @qp_init_attr: Additional attributes of the selected QP.
2114 * The qp_attr_mask may be used to limit the query to gathering only the
2115 * selected attributes.
2117 int ib_query_qp(struct ib_qp *qp,
2118 struct ib_qp_attr *qp_attr,
2120 struct ib_qp_init_attr *qp_init_attr);
2123 * ib_destroy_qp - Destroys the specified QP.
2124 * @qp: The QP to destroy.
2126 int ib_destroy_qp(struct ib_qp *qp);
2129 * ib_open_qp - Obtain a reference to an existing sharable QP.
2130 * @xrcd - XRC domain
2131 * @qp_open_attr: Attributes identifying the QP to open.
2133 * Returns a reference to a sharable QP.
2135 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2136 struct ib_qp_open_attr *qp_open_attr);
2139 * ib_close_qp - Release an external reference to a QP.
2140 * @qp: The QP handle to release
2142 * The opened QP handle is released by the caller. The underlying
2143 * shared QP is not destroyed until all internal references are released.
2145 int ib_close_qp(struct ib_qp *qp);
2148 * ib_post_send - Posts a list of work requests to the send queue of
2150 * @qp: The QP to post the work request on.
2151 * @send_wr: A list of work requests to post on the send queue.
2152 * @bad_send_wr: On an immediate failure, this parameter will reference
2153 * the work request that failed to be posted on the QP.
2155 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
2156 * error is returned, the QP state shall not be affected,
2157 * ib_post_send() will return an immediate error after queueing any
2158 * earlier work requests in the list.
2160 static inline int ib_post_send(struct ib_qp *qp,
2161 struct ib_send_wr *send_wr,
2162 struct ib_send_wr **bad_send_wr)
2164 return qp->device->post_send(qp, send_wr, bad_send_wr);
2168 * ib_post_recv - Posts a list of work requests to the receive queue of
2170 * @qp: The QP to post the work request on.
2171 * @recv_wr: A list of work requests to post on the receive queue.
2172 * @bad_recv_wr: On an immediate failure, this parameter will reference
2173 * the work request that failed to be posted on the QP.
2175 static inline int ib_post_recv(struct ib_qp *qp,
2176 struct ib_recv_wr *recv_wr,
2177 struct ib_recv_wr **bad_recv_wr)
2179 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2183 * ib_create_cq - Creates a CQ on the specified device.
2184 * @device: The device on which to create the CQ.
2185 * @comp_handler: A user-specified callback that is invoked when a
2186 * completion event occurs on the CQ.
2187 * @event_handler: A user-specified callback that is invoked when an
2188 * asynchronous event not associated with a completion occurs on the CQ.
2189 * @cq_context: Context associated with the CQ returned to the user via
2190 * the associated completion and event handlers.
2191 * @cqe: The minimum size of the CQ.
2192 * @comp_vector - Completion vector used to signal completion events.
2193 * Must be >= 0 and < context->num_comp_vectors.
2195 * Users can examine the cq structure to determine the actual CQ size.
2197 struct ib_cq *ib_create_cq(struct ib_device *device,
2198 ib_comp_handler comp_handler,
2199 void (*event_handler)(struct ib_event *, void *),
2200 void *cq_context, int cqe, int comp_vector);
2203 * ib_resize_cq - Modifies the capacity of the CQ.
2204 * @cq: The CQ to resize.
2205 * @cqe: The minimum size of the CQ.
2207 * Users can examine the cq structure to determine the actual CQ size.
2209 int ib_resize_cq(struct ib_cq *cq, int cqe);
2212 * ib_modify_cq - Modifies the attributes for the specified CQ and then
2213 * transitions the CQ to the given state.
2214 * @cq: The CQ to modify.
2215 * @cq_attr: specifies the CQ attributes to modify.
2216 * @cq_attr_mask: A bit-mask used to specify which attributes of the CQ
2217 * are being modified.
2219 int ib_modify_cq(struct ib_cq *cq,
2220 struct ib_cq_attr *cq_attr,
2224 * ib_destroy_cq - Destroys the specified CQ.
2225 * @cq: The CQ to destroy.
2227 int ib_destroy_cq(struct ib_cq *cq);
2230 * ib_poll_cq - poll a CQ for completion(s)
2231 * @cq:the CQ being polled
2232 * @num_entries:maximum number of completions to return
2233 * @wc:array of at least @num_entries &struct ib_wc where completions
2236 * Poll a CQ for (possibly multiple) completions. If the return value
2237 * is < 0, an error occurred. If the return value is >= 0, it is the
2238 * number of completions returned. If the return value is
2239 * non-negative and < num_entries, then the CQ was emptied.
2241 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2244 return cq->device->poll_cq(cq, num_entries, wc);
2248 * ib_peek_cq - Returns the number of unreaped completions currently
2249 * on the specified CQ.
2250 * @cq: The CQ to peek.
2251 * @wc_cnt: A minimum number of unreaped completions to check for.
2253 * If the number of unreaped completions is greater than or equal to wc_cnt,
2254 * this function returns wc_cnt, otherwise, it returns the actual number of
2255 * unreaped completions.
2257 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2260 * ib_req_notify_cq - Request completion notification on a CQ.
2261 * @cq: The CQ to generate an event for.
2263 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2264 * to request an event on the next solicited event or next work
2265 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2266 * may also be |ed in to request a hint about missed events, as
2270 * < 0 means an error occurred while requesting notification
2271 * == 0 means notification was requested successfully, and if
2272 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2273 * were missed and it is safe to wait for another event. In
2274 * this case is it guaranteed that any work completions added
2275 * to the CQ since the last CQ poll will trigger a completion
2276 * notification event.
2277 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2278 * in. It means that the consumer must poll the CQ again to
2279 * make sure it is empty to avoid missing an event because of a
2280 * race between requesting notification and an entry being
2281 * added to the CQ. This return value means it is possible
2282 * (but not guaranteed) that a work completion has been added
2283 * to the CQ since the last poll without triggering a
2284 * completion notification event.
2286 static inline int ib_req_notify_cq(struct ib_cq *cq,
2287 enum ib_cq_notify_flags flags)
2289 return cq->device->req_notify_cq(cq, flags);
2293 * ib_req_ncomp_notif - Request completion notification when there are
2294 * at least the specified number of unreaped completions on the CQ.
2295 * @cq: The CQ to generate an event for.
2296 * @wc_cnt: The number of unreaped completions that should be on the
2297 * CQ before an event is generated.
2299 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2301 return cq->device->req_ncomp_notif ?
2302 cq->device->req_ncomp_notif(cq, wc_cnt) :
2307 * ib_get_dma_mr - Returns a memory region for system memory that is
2309 * @pd: The protection domain associated with the memory region.
2310 * @mr_access_flags: Specifies the memory access rights.
2312 * Note that the ib_dma_*() functions defined below must be used
2313 * to create/destroy addresses used with the Lkey or Rkey returned
2314 * by ib_get_dma_mr().
2316 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2319 * ib_dma_mapping_error - check a DMA addr for error
2320 * @dev: The device for which the dma_addr was created
2321 * @dma_addr: The DMA address to check
2323 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2326 return dev->dma_ops->mapping_error(dev, dma_addr);
2327 return dma_mapping_error(dev->dma_device, dma_addr);
2331 * ib_dma_map_single - Map a kernel virtual address to DMA address
2332 * @dev: The device for which the dma_addr is to be created
2333 * @cpu_addr: The kernel virtual address
2334 * @size: The size of the region in bytes
2335 * @direction: The direction of the DMA
2337 static inline u64 ib_dma_map_single(struct ib_device *dev,
2338 void *cpu_addr, size_t size,
2339 enum dma_data_direction direction)
2342 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2343 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2347 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2348 * @dev: The device for which the DMA address was created
2349 * @addr: The DMA address
2350 * @size: The size of the region in bytes
2351 * @direction: The direction of the DMA
2353 static inline void ib_dma_unmap_single(struct ib_device *dev,
2354 u64 addr, size_t size,
2355 enum dma_data_direction direction)
2358 dev->dma_ops->unmap_single(dev, addr, size, direction);
2360 dma_unmap_single(dev->dma_device, addr, size, direction);
2363 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2364 void *cpu_addr, size_t size,
2365 enum dma_data_direction direction,
2366 struct dma_attrs *attrs)
2368 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2372 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2373 u64 addr, size_t size,
2374 enum dma_data_direction direction,
2375 struct dma_attrs *attrs)
2377 return dma_unmap_single_attrs(dev->dma_device, addr, size,
2382 * ib_dma_map_page - Map a physical page to DMA address
2383 * @dev: The device for which the dma_addr is to be created
2384 * @page: The page to be mapped
2385 * @offset: The offset within the page
2386 * @size: The size of the region in bytes
2387 * @direction: The direction of the DMA
2389 static inline u64 ib_dma_map_page(struct ib_device *dev,
2391 unsigned long offset,
2393 enum dma_data_direction direction)
2396 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2397 return dma_map_page(dev->dma_device, page, offset, size, direction);
2401 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2402 * @dev: The device for which the DMA address was created
2403 * @addr: The DMA address
2404 * @size: The size of the region in bytes
2405 * @direction: The direction of the DMA
2407 static inline void ib_dma_unmap_page(struct ib_device *dev,
2408 u64 addr, size_t size,
2409 enum dma_data_direction direction)
2412 dev->dma_ops->unmap_page(dev, addr, size, direction);
2414 dma_unmap_page(dev->dma_device, addr, size, direction);
2418 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2419 * @dev: The device for which the DMA addresses are to be created
2420 * @sg: The array of scatter/gather entries
2421 * @nents: The number of scatter/gather entries
2422 * @direction: The direction of the DMA
2424 static inline int ib_dma_map_sg(struct ib_device *dev,
2425 struct scatterlist *sg, int nents,
2426 enum dma_data_direction direction)
2429 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2430 return dma_map_sg(dev->dma_device, sg, nents, direction);
2434 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2435 * @dev: The device for which the DMA addresses were created
2436 * @sg: The array of scatter/gather entries
2437 * @nents: The number of scatter/gather entries
2438 * @direction: The direction of the DMA
2440 static inline void ib_dma_unmap_sg(struct ib_device *dev,
2441 struct scatterlist *sg, int nents,
2442 enum dma_data_direction direction)
2445 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2447 dma_unmap_sg(dev->dma_device, sg, nents, direction);
2450 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2451 struct scatterlist *sg, int nents,
2452 enum dma_data_direction direction,
2453 struct dma_attrs *attrs)
2455 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2458 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2459 struct scatterlist *sg, int nents,
2460 enum dma_data_direction direction,
2461 struct dma_attrs *attrs)
2463 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2466 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2467 * @dev: The device for which the DMA addresses were created
2468 * @sg: The scatter/gather entry
2470 static inline u64 ib_sg_dma_address(struct ib_device *dev,
2471 struct scatterlist *sg)
2474 return dev->dma_ops->dma_address(dev, sg);
2475 return sg_dma_address(sg);
2479 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2480 * @dev: The device for which the DMA addresses were created
2481 * @sg: The scatter/gather entry
2483 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2484 struct scatterlist *sg)
2487 return dev->dma_ops->dma_len(dev, sg);
2488 return sg_dma_len(sg);
2492 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2493 * @dev: The device for which the DMA address was created
2494 * @addr: The DMA address
2495 * @size: The size of the region in bytes
2496 * @dir: The direction of the DMA
2498 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2501 enum dma_data_direction dir)
2504 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2506 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2510 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2511 * @dev: The device for which the DMA address was created
2512 * @addr: The DMA address
2513 * @size: The size of the region in bytes
2514 * @dir: The direction of the DMA
2516 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2519 enum dma_data_direction dir)
2522 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2524 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2528 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2529 * @dev: The device for which the DMA address is requested
2530 * @size: The size of the region to allocate in bytes
2531 * @dma_handle: A pointer for returning the DMA address of the region
2532 * @flag: memory allocator flags
2534 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2540 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
2545 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2546 *dma_handle = handle;
2552 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2553 * @dev: The device for which the DMA addresses were allocated
2554 * @size: The size of the region
2555 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2556 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2558 static inline void ib_dma_free_coherent(struct ib_device *dev,
2559 size_t size, void *cpu_addr,
2563 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2565 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2569 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2571 * @pd: The protection domain associated assigned to the registered region.
2572 * @phys_buf_array: Specifies a list of physical buffers to use in the
2574 * @num_phys_buf: Specifies the size of the phys_buf_array.
2575 * @mr_access_flags: Specifies the memory access rights.
2576 * @iova_start: The offset of the region's starting I/O virtual address.
2578 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2579 struct ib_phys_buf *phys_buf_array,
2581 int mr_access_flags,
2585 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2586 * Conceptually, this call performs the functions deregister memory region
2587 * followed by register physical memory region. Where possible,
2588 * resources are reused instead of deallocated and reallocated.
2589 * @mr: The memory region to modify.
2590 * @mr_rereg_mask: A bit-mask used to indicate which of the following
2591 * properties of the memory region are being modified.
2592 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2593 * the new protection domain to associated with the memory region,
2594 * otherwise, this parameter is ignored.
2595 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2596 * field specifies a list of physical buffers to use in the new
2597 * translation, otherwise, this parameter is ignored.
2598 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2599 * field specifies the size of the phys_buf_array, otherwise, this
2600 * parameter is ignored.
2601 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2602 * field specifies the new memory access rights, otherwise, this
2603 * parameter is ignored.
2604 * @iova_start: The offset of the region's starting I/O virtual address.
2606 int ib_rereg_phys_mr(struct ib_mr *mr,
2609 struct ib_phys_buf *phys_buf_array,
2611 int mr_access_flags,
2615 * ib_query_mr - Retrieves information about a specific memory region.
2616 * @mr: The memory region to retrieve information about.
2617 * @mr_attr: The attributes of the specified memory region.
2619 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2622 * ib_dereg_mr - Deregisters a memory region and removes it from the
2623 * HCA translation table.
2624 * @mr: The memory region to deregister.
2626 * This function can fail, if the memory region has memory windows bound to it.
2628 int ib_dereg_mr(struct ib_mr *mr);
2632 * ib_create_mr - Allocates a memory region that may be used for
2633 * signature handover operations.
2634 * @pd: The protection domain associated with the region.
2635 * @mr_init_attr: memory region init attributes.
2637 struct ib_mr *ib_create_mr(struct ib_pd *pd,
2638 struct ib_mr_init_attr *mr_init_attr);
2641 * ib_destroy_mr - Destroys a memory region that was created using
2642 * ib_create_mr and removes it from HW translation tables.
2643 * @mr: The memory region to destroy.
2645 * This function can fail, if the memory region has memory windows bound to it.
2647 int ib_destroy_mr(struct ib_mr *mr);
2650 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2651 * IB_WR_FAST_REG_MR send work request.
2652 * @pd: The protection domain associated with the region.
2653 * @max_page_list_len: requested max physical buffer list length to be
2654 * used with fast register work requests for this MR.
2656 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2659 * ib_alloc_fast_reg_page_list - Allocates a page list array
2660 * @device - ib device pointer.
2661 * @page_list_len - size of the page list array to be allocated.
2663 * This allocates and returns a struct ib_fast_reg_page_list * and a
2664 * page_list array that is at least page_list_len in size. The actual
2665 * size is returned in max_page_list_len. The caller is responsible
2666 * for initializing the contents of the page_list array before posting
2667 * a send work request with the IB_WC_FAST_REG_MR opcode.
2669 * The page_list array entries must be translated using one of the
2670 * ib_dma_*() functions just like the addresses passed to
2671 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
2672 * ib_fast_reg_page_list must not be modified by the caller until the
2673 * IB_WC_FAST_REG_MR work request completes.
2675 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2676 struct ib_device *device, int page_list_len);
2679 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2681 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2683 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2686 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2688 * @mr - struct ib_mr pointer to be updated.
2689 * @newkey - new key to be used.
2691 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2693 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2694 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2698 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2699 * for calculating a new rkey for type 2 memory windows.
2700 * @rkey - the rkey to increment.
2702 static inline u32 ib_inc_rkey(u32 rkey)
2704 const u32 mask = 0x000000ff;
2705 return ((rkey + 1) & mask) | (rkey & ~mask);
2709 * ib_alloc_mw - Allocates a memory window.
2710 * @pd: The protection domain associated with the memory window.
2711 * @type: The type of the memory window (1 or 2).
2713 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2716 * ib_bind_mw - Posts a work request to the send queue of the specified
2717 * QP, which binds the memory window to the given address range and
2718 * remote access attributes.
2719 * @qp: QP to post the bind work request on.
2720 * @mw: The memory window to bind.
2721 * @mw_bind: Specifies information about the memory window, including
2722 * its address range, remote access rights, and associated memory region.
2724 * If there is no immediate error, the function will update the rkey member
2725 * of the mw parameter to its new value. The bind operation can still fail
2728 static inline int ib_bind_mw(struct ib_qp *qp,
2730 struct ib_mw_bind *mw_bind)
2732 /* XXX reference counting in corresponding MR? */
2733 return mw->device->bind_mw ?
2734 mw->device->bind_mw(qp, mw, mw_bind) :
2739 * ib_dealloc_mw - Deallocates a memory window.
2740 * @mw: The memory window to deallocate.
2742 int ib_dealloc_mw(struct ib_mw *mw);
2745 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2746 * @pd: The protection domain associated with the unmapped region.
2747 * @mr_access_flags: Specifies the memory access rights.
2748 * @fmr_attr: Attributes of the unmapped region.
2750 * A fast memory region must be mapped before it can be used as part of
2753 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2754 int mr_access_flags,
2755 struct ib_fmr_attr *fmr_attr);
2758 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2759 * @fmr: The fast memory region to associate with the pages.
2760 * @page_list: An array of physical pages to map to the fast memory region.
2761 * @list_len: The number of pages in page_list.
2762 * @iova: The I/O virtual address to use with the mapped region.
2764 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2765 u64 *page_list, int list_len,
2768 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2772 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2773 * @fmr_list: A linked list of fast memory regions to unmap.
2775 int ib_unmap_fmr(struct list_head *fmr_list);
2778 * ib_dealloc_fmr - Deallocates a fast memory region.
2779 * @fmr: The fast memory region to deallocate.
2781 int ib_dealloc_fmr(struct ib_fmr *fmr);
2784 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2785 * @qp: QP to attach to the multicast group. The QP must be type
2787 * @gid: Multicast group GID.
2788 * @lid: Multicast group LID in host byte order.
2790 * In order to send and receive multicast packets, subnet
2791 * administration must have created the multicast group and configured
2792 * the fabric appropriately. The port associated with the specified
2793 * QP must also be a member of the multicast group.
2795 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2798 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2799 * @qp: QP to detach from the multicast group.
2800 * @gid: Multicast group GID.
2801 * @lid: Multicast group LID in host byte order.
2803 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2806 * ib_alloc_xrcd - Allocates an XRC domain.
2807 * @device: The device on which to allocate the XRC domain.
2809 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2812 * ib_dealloc_xrcd - Deallocates an XRC domain.
2813 * @xrcd: The XRC domain to deallocate.
2815 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2817 struct ib_flow *ib_create_flow(struct ib_qp *qp,
2818 struct ib_flow_attr *flow_attr, int domain);
2819 int ib_destroy_flow(struct ib_flow *flow_id);
2821 struct ib_dct *ib_create_dct(struct ib_pd *pd, struct ib_dct_init_attr *attr,
2822 struct ib_udata *udata);
2823 int ib_destroy_dct(struct ib_dct *dct);
2824 int ib_query_dct(struct ib_dct *dct, struct ib_dct_attr *attr);
2826 int ib_query_values(struct ib_device *device,
2827 int q_values, struct ib_device_values *values);
2829 static inline void ib_active_speed_enum_to_rate(u8 active_speed,
2833 switch (active_speed) {
2842 case IB_SPEED_FDR10:
2855 default: /* default to SDR for invalid rates */
2862 static inline int ib_check_mr_access(int flags)
2865 * Local write permission is required if remote write or
2866 * remote atomic permission is also requested.
2868 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2869 !(flags & IB_ACCESS_LOCAL_WRITE))
2876 * ib_check_mr_status: lightweight check of MR status.
2877 * This routine may provide status checks on a selected
2878 * ib_mr. first use is for signature status check.
2880 * @mr: A memory region.
2881 * @check_mask: Bitmask of which checks to perform from
2882 * ib_mr_status_check enumeration.
2883 * @mr_status: The container of relevant status checks.
2884 * failed checks will be indicated in the status bitmask
2885 * and the relevant info shall be in the error item.
2887 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2888 struct ib_mr_status *mr_status);
2890 #endif /* IB_VERBS_H */