2 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/compiler.h>
38 #include <linux/list.h>
39 #include <linux/mutex.h>
40 #include <linux/idr.h>
41 #include <linux/notifier.h>
43 #include <rdma/ib_verbs.h>
44 #include <rdma/ib_umem.h>
45 #include <rdma/ib_mad.h>
46 #include <rdma/ib_sa.h>
48 #include <linux/mlx4/device.h>
49 #include <linux/mlx4/doorbell.h>
50 #include <linux/rbtree.h>
52 #define MLX4_IB_DRV_NAME "mlx4_ib"
57 #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
59 #define mlx4_ib_warn(ibdev, format, arg...) \
60 dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
62 #define mlx4_ib_info(ibdev, format, arg...) \
63 dev_info((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
66 MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
67 MLX4_IB_MAX_HEADROOM = 2048
70 #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
71 #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
73 /*module param to indicate if SM assigns the alias_GUID*/
74 extern int mlx4_ib_sm_guid_assign;
76 extern struct proc_dir_entry *mlx4_mrs_dir_entry;
79 #define MLX4_IB_UC_STEER_QPN_ALIGN 1
80 #define MLX4_IB_UC_MAX_NUM_QPS (256 * 1024)
83 #define MLX4_IB_MMAP_CMD_MASK 0xFF
84 #define MLX4_IB_MMAP_CMD_BITS 8
86 struct mlx4_ib_ucontext {
87 struct ib_ucontext ibucontext;
89 struct list_head db_page_list;
90 struct mutex db_page_mutex;
99 struct ib_xrcd ibxrcd;
105 struct mlx4_ib_cq_buf {
111 struct mlx4_ib_cq_resize {
112 struct mlx4_ib_cq_buf buf;
116 struct mlx4_shared_mr_info {
118 struct ib_umem *umem;
124 struct mlx4_ib_cq_buf buf;
125 struct mlx4_ib_cq_resize *resize_buf;
128 struct mutex resize_mutex;
129 struct ib_umem *umem;
130 struct ib_umem *resize_umem;
136 struct ib_umem *umem;
137 struct mlx4_shared_mr_info *smr_info;
140 struct mlx4_ib_fast_reg_page_list {
141 struct ib_fast_reg_page_list ibfrpl;
142 __be64 *mapped_page_list;
148 struct mlx4_fmr mfmr;
163 enum mlx4_ib_qp_flags {
164 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
165 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
166 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
167 MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
168 MLX4_IB_SRIOV_SQP = 1 << 31,
171 struct mlx4_ib_gid_entry {
172 struct list_head list;
178 enum mlx4_ib_mmap_cmd {
179 MLX4_IB_MMAP_UAR_PAGE = 0,
180 MLX4_IB_MMAP_BLUE_FLAME_PAGE = 1,
181 MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES = 2,
184 enum mlx4_ib_qp_type {
186 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
187 * here (and in that order) since the MAD layer uses them as
188 * indices into a 2-entry table.
190 MLX4_IB_QPT_SMI = IB_QPT_SMI,
191 MLX4_IB_QPT_GSI = IB_QPT_GSI,
193 MLX4_IB_QPT_RC = IB_QPT_RC,
194 MLX4_IB_QPT_UC = IB_QPT_UC,
195 MLX4_IB_QPT_UD = IB_QPT_UD,
196 MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
197 MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
198 MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
199 MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
200 MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
202 MLX4_IB_QPT_PROXY_SMI_OWNER = 1 << 16,
203 MLX4_IB_QPT_PROXY_SMI = 1 << 17,
204 MLX4_IB_QPT_PROXY_GSI = 1 << 18,
205 MLX4_IB_QPT_TUN_SMI_OWNER = 1 << 19,
206 MLX4_IB_QPT_TUN_SMI = 1 << 20,
207 MLX4_IB_QPT_TUN_GSI = 1 << 21,
210 #define MLX4_IB_QPT_ANY_SRIOV (MLX4_IB_QPT_PROXY_SMI_OWNER | \
211 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
212 MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
214 enum mlx4_ib_mad_ifc_flags {
215 MLX4_MAD_IFC_IGNORE_MKEY = 1,
216 MLX4_MAD_IFC_IGNORE_BKEY = 2,
217 MLX4_MAD_IFC_IGNORE_KEYS = (MLX4_MAD_IFC_IGNORE_MKEY |
218 MLX4_MAD_IFC_IGNORE_BKEY),
219 MLX4_MAD_IFC_NET_VIEW = 4,
223 MLX4_NUM_TUNNEL_BUFS = 256,
226 struct mlx4_ib_tunnel_header {
241 struct mlx4_rcv_tunnel_hdr {
242 __be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
243 * 0x0 - no vlan was in the packet
244 * 0x01 - C-VLAN was in the packet */
245 u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
249 __be16 slid_mac_47_32;
253 struct mlx4_ib_proxy_sqp_hdr {
255 struct mlx4_rcv_tunnel_hdr tun;
258 struct mlx4_roce_smac_vlan_info {
263 int candidate_smac_index;
264 int candidate_smac_port;
269 int candidate_vlan_index;
270 int candidate_vlan_port;
274 struct mlx4_ib_qpg_data {
275 unsigned long *tss_bitmap;
276 unsigned long *rss_bitmap;
277 struct mlx4_ib_qp *qpg_parent;
291 struct mlx4_ib_wq rq;
294 __be32 sq_signal_bits;
295 unsigned sq_next_wqe;
296 int sq_max_wqes_per_wr;
298 struct mlx4_ib_wq sq;
300 enum mlx4_ib_qp_type mlx4_ib_qp_type;
301 struct ib_umem *umem;
314 enum ib_qpg_type qpg_type;
315 struct mlx4_ib_qpg_data *qpg_data;
316 struct list_head gid_list;
317 struct list_head steering_rules;
318 struct mlx4_ib_buf *sqp_proxy_rcv;
319 struct mlx4_roce_smac_vlan_info pri;
320 struct mlx4_roce_smac_vlan_info alt;
321 struct list_head rules_list;
328 struct mlx4_srq msrq;
336 struct ib_umem *umem;
343 union mlx4_ext_av av;
346 /****************************************/
347 /* alias guid support */
348 /****************************************/
349 #define NUM_PORT_ALIAS_GUID 2
350 #define NUM_ALIAS_GUID_IN_REC 8
351 #define NUM_ALIAS_GUID_REC_IN_PORT 16
352 #define GUID_REC_SIZE 8
353 #define NUM_ALIAS_GUID_PER_PORT 128
354 #define MLX4_NOT_SET_GUID (0x00LL)
355 #define MLX4_GUID_FOR_DELETE_VAL (~(0x00LL))
357 enum mlx4_guid_alias_rec_status {
358 MLX4_GUID_INFO_STATUS_IDLE,
359 MLX4_GUID_INFO_STATUS_SET,
360 MLX4_GUID_INFO_STATUS_PENDING,
363 enum mlx4_guid_alias_rec_ownership {
364 MLX4_GUID_DRIVER_ASSIGN,
365 MLX4_GUID_SYSADMIN_ASSIGN,
366 MLX4_GUID_NONE_ASSIGN, /*init state of each record*/
369 enum mlx4_guid_alias_rec_method {
370 MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET,
371 MLX4_GUID_INFO_RECORD_DELETE = IB_SA_METHOD_DELETE,
374 struct mlx4_sriov_alias_guid_info_rec_det {
375 u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
376 ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
377 enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
378 u8 method; /*set or delete*/
379 enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/
382 struct mlx4_sriov_alias_guid_port_rec_det {
383 struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
384 struct workqueue_struct *wq;
385 struct delayed_work alias_guid_work;
387 struct mlx4_sriov_alias_guid *parent;
388 struct list_head cb_list;
391 struct mlx4_sriov_alias_guid {
392 struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS];
393 spinlock_t ag_work_lock;
394 struct ib_sa_client *sa_client;
397 struct mlx4_ib_demux_work {
398 struct work_struct work;
399 struct mlx4_ib_dev *dev;
406 struct mlx4_ib_tun_tx_buf {
407 struct mlx4_ib_buf buf;
411 struct mlx4_ib_demux_pv_qp {
413 enum ib_qp_type proxy_qpt;
414 struct mlx4_ib_buf *ring;
415 struct mlx4_ib_tun_tx_buf *tx_ring;
421 enum mlx4_ib_demux_pv_state {
423 DEMUX_PV_STATE_STARTING,
424 DEMUX_PV_STATE_ACTIVE,
425 DEMUX_PV_STATE_DOWNING,
428 struct mlx4_ib_demux_pv_ctx {
431 enum mlx4_ib_demux_pv_state state;
433 struct ib_device *ib_dev;
437 struct work_struct work;
438 struct workqueue_struct *wq;
439 struct mlx4_ib_demux_pv_qp qp[2];
442 struct mlx4_ib_demux_ctx {
443 struct ib_device *ib_dev;
445 struct workqueue_struct *wq;
446 struct workqueue_struct *ud_wq;
448 __be64 subnet_prefix;
449 __be64 guid_cache[128];
450 struct mlx4_ib_dev *dev;
451 /* the following lock protects both mcg_table and mcg_mgid0_list */
452 struct mutex mcg_table_lock;
453 struct rb_root mcg_table;
454 struct list_head mcg_mgid0_list;
455 struct workqueue_struct *mcg_wq;
456 struct mlx4_ib_demux_pv_ctx **tun;
458 int flushing; /* flushing the work queue */
461 struct mlx4_ib_sriov {
462 struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
463 struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
464 /* when using this spinlock you should use "irq" because
465 * it may be called from interrupt context.*/
466 spinlock_t going_down_lock;
469 struct mlx4_sriov_alias_guid alias_guid;
471 /* CM paravirtualization fields */
472 struct list_head cm_list;
473 spinlock_t id_map_lock;
474 struct rb_root sl_id_map;
475 struct idr pv_id_table;
478 struct mlx4_ib_iboe {
480 struct net_device *netdevs[MLX4_MAX_PORTS];
481 struct notifier_block nb;
482 union ib_gid gid_table[MLX4_MAX_PORTS][128];
486 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
487 u16 phys_pkey_cache[MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
488 struct list_head pkey_port_list[MLX4_MFUNC_MAX];
489 struct kobject *device_parent[MLX4_MFUNC_MAX];
492 struct mlx4_ib_iov_sysfs_attr {
494 struct kobject *kobj;
498 struct device_attribute dentry;
502 struct mlx4_ib_iov_sysfs_attr_ar {
503 struct mlx4_ib_iov_sysfs_attr dentries[3 * NUM_ALIAS_GUID_PER_PORT + 1];
506 struct mlx4_ib_iov_port {
509 struct mlx4_ib_dev *dev;
510 struct list_head list;
511 struct mlx4_ib_iov_sysfs_attr_ar *dentr_ar;
512 struct ib_port_attr attr;
513 struct kobject *cur_port;
514 struct kobject *admin_alias_parent;
515 struct kobject *gids_parent;
516 struct kobject *pkeys_parent;
517 struct kobject *mcgs_parent;
518 struct mlx4_ib_iov_sysfs_attr mcg_dentry;
522 struct ib_device ib_dev;
523 struct mlx4_dev *dev;
525 struct mlx4_uar priv_uar;
527 MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
529 struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
530 struct ib_ah *sm_ah[MLX4_MAX_PORTS];
532 struct mlx4_ib_sriov sriov;
534 struct mutex cap_mask_mutex;
536 struct mlx4_ib_iboe iboe;
537 int counters[MLX4_MAX_PORTS];
540 struct kobject *iov_parent;
541 struct kobject *ports_parent;
542 struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
543 struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS];
544 struct pkey_mgt pkeys;
545 unsigned long *ib_uc_qpns_bitmap;
550 struct ib_event_work {
551 struct work_struct work;
552 struct mlx4_ib_dev *ib_dev;
553 struct mlx4_eqe ib_eqe;
556 struct mlx4_ib_qp_tunnel_init_attr {
557 struct ib_qp_init_attr init_attr;
559 enum ib_qp_type proxy_qp_type;
563 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
565 return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
568 static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
570 return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
573 static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
575 return container_of(ibpd, struct mlx4_ib_pd, ibpd);
578 static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
580 return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
583 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
585 return container_of(ibcq, struct mlx4_ib_cq, ibcq);
588 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
590 return container_of(mcq, struct mlx4_ib_cq, mcq);
593 static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
595 return container_of(ibmr, struct mlx4_ib_mr, ibmr);
598 static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
600 return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
603 static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
605 return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
607 static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
609 return container_of(ibqp, struct mlx4_ib_qp, ibqp);
612 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
614 return container_of(mqp, struct mlx4_ib_qp, mqp);
617 static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
619 return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
622 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
624 return container_of(msrq, struct mlx4_ib_srq, msrq);
627 static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
629 return container_of(ibah, struct mlx4_ib_ah, ibah);
632 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
633 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
635 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
637 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
639 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
640 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
641 struct ib_umem *umem);
642 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
645 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
646 u64 virt_addr, int access_flags,
647 struct ib_udata *udata, int mr_id);
648 int mlx4_ib_dereg_mr(struct ib_mr *mr);
649 struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
650 int max_page_list_len);
651 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
653 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
655 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
656 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
657 int mlx4_ib_ignore_overrun_cq(struct ib_cq *ibcq);
658 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
659 struct ib_ucontext *context,
660 struct ib_udata *udata);
661 int mlx4_ib_destroy_cq(struct ib_cq *cq);
662 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
663 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
664 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
665 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
667 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
668 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
669 int mlx4_ib_destroy_ah(struct ib_ah *ah);
671 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
672 struct ib_srq_init_attr *init_attr,
673 struct ib_udata *udata);
674 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
675 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
676 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
677 int mlx4_ib_destroy_srq(struct ib_srq *srq);
678 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
679 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
680 struct ib_recv_wr **bad_wr);
682 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
683 struct ib_qp_init_attr *init_attr,
684 struct ib_udata *udata);
685 int mlx4_ib_destroy_qp(struct ib_qp *qp);
686 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
687 int attr_mask, struct ib_udata *udata);
688 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
689 struct ib_qp_init_attr *qp_init_attr);
690 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
691 struct ib_send_wr **bad_wr);
692 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
693 struct ib_recv_wr **bad_wr);
695 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
696 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
697 void *in_mad, void *response_mad);
698 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
699 struct ib_wc *in_wc, struct ib_grh *in_grh,
700 struct ib_mad *in_mad, struct ib_mad *out_mad);
701 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
702 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
704 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
705 struct ib_fmr_attr *fmr_attr);
706 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
708 int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
709 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
710 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
711 struct ib_port_attr *props, int netw_view);
712 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
713 u16 *pkey, int netw_view);
715 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
716 union ib_gid *gid, int netw_view);
718 int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
719 u8 *mac, int *is_mcast, u8 port);
721 int mlx4_ib_query_if_stat(struct mlx4_ib_dev *dev, u32 counter_index,
722 union mlx4_counter *counter, u8 clear);
724 static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
726 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
728 if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
731 return !!(ah->av.ib.g_slid & 0x80);
734 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
735 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
736 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
737 int mlx4_ib_mcg_init(void);
738 void mlx4_ib_mcg_destroy(void);
740 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid);
742 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
743 struct ib_sa_mad *sa_mad);
744 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
745 struct ib_sa_mad *mad);
747 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
750 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
751 enum ib_event_type type);
753 void mlx4_ib_tunnels_update_work(struct work_struct *work);
755 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
756 enum ib_qp_type qpt, struct ib_wc *wc,
757 struct ib_grh *grh, struct ib_mad *mad);
758 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
759 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
760 u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad);
761 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
763 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
764 struct ib_mad *mad, int is_eth);
766 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
769 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
770 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
772 /* alias guid support */
773 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port);
774 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev);
775 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev);
776 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
778 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
780 u8 port_num, u8 *p_data);
782 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
783 int block_num, u8 port_num,
786 int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
787 struct attribute *attr);
788 void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
789 struct attribute *attr);
790 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
792 int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
794 void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
796 __be64 mlx4_ib_gen_node_guid(void);
798 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
799 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
800 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
803 #endif /* MLX4_IB_H */