2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __IW_CXGB4_H__
34 #define __IW_CXGB4_H__
36 #include <linux/list.h>
37 #include <linux/spinlock.h>
38 #include <linux/idr.h>
39 #include <linux/completion.h>
40 #include <linux/netdevice.h>
41 #include <linux/sched.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/wait.h>
45 #include <linux/kref.h>
46 #include <linux/timer.h>
49 #include <asm/byteorder.h>
51 #include <netinet/in.h>
52 #include <netinet/toecore.h>
54 #include <rdma/ib_verbs.h>
55 #include <rdma/iw_cm.h>
59 #include "common/common.h"
60 #include "common/t4_msg.h"
61 #include "common/t4_regs.h"
62 #include "common/t4_tcb.h"
65 #define DRV_NAME "iw_cxgbe"
66 #define MOD DRV_NAME ":"
67 #define KTR_IW_CXGBE KTR_SPARE3
69 extern int c4iw_debug;
70 #define PDBG(fmt, args...) \
73 printf(MOD fmt, ## args); \
78 static inline void *cplhdr(struct mbuf *m)
80 return mtod(m, void*);
83 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start)
84 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start)
86 #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
87 #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
89 struct c4iw_id_table {
91 u32 start; /* logical minimal id */
92 u32 last; /* hint for find */
98 struct c4iw_resource {
99 struct c4iw_id_table tpt_table;
100 struct c4iw_id_table qid_table;
101 struct c4iw_id_table pdid_table;
104 struct c4iw_qid_list {
105 struct list_head entry;
109 struct c4iw_dev_ucontext {
110 struct list_head qpids;
111 struct list_head cqids;
115 enum c4iw_rdev_flags {
116 T4_FATAL_ERROR = (1<<0),
128 struct c4iw_stat qid;
130 struct c4iw_stat stag;
131 struct c4iw_stat pbl;
132 struct c4iw_stat rqt;
136 u64 db_state_transitions;
140 struct adapter *adap;
141 struct c4iw_resource resource;
142 unsigned long qpshift;
144 unsigned long cqshift;
146 struct c4iw_dev_ucontext uctx;
147 struct gen_pool *pbl_pool;
148 struct gen_pool *rqt_pool;
150 struct c4iw_stats stats;
153 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
155 return rdev->flags & T4_FATAL_ERROR;
158 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
160 return min((int)T4_MAX_NUM_STAG, (int)(rdev->adap->vres.stag.size >> 5));
163 #define C4IW_WR_TO (10*HZ)
165 struct c4iw_wr_wait {
170 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
173 atomic_set(&wr_waitp->completion, 0);
176 static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
179 atomic_set(&wr_waitp->completion, 1);
184 c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
185 u32 hwtid, u32 qpid, const char *func)
187 struct adapter *sc = rdev->adap;
188 unsigned to = C4IW_WR_TO;
190 while (!atomic_read(&wr_waitp->completion)) {
191 tsleep(wr_waitp, 0, "c4iw_wait", to);
192 if (SIGPENDING(curthread)) {
193 printf("%s - Device %s not responding - "
194 "tid %u qpid %u\n", func,
195 device_get_nameunit(sc->dev), hwtid, qpid);
196 if (c4iw_fatal_error(rdev)) {
197 wr_waitp->ret = -EIO;
204 CTR4(KTR_IW_CXGBE, "%s: FW reply %d tid %u qpid %u",
205 device_get_nameunit(sc->dev), wr_waitp->ret, hwtid, qpid);
206 return (wr_waitp->ret);
216 struct ib_device ibdev;
217 struct c4iw_rdev rdev;
218 u32 device_cap_flags;
223 struct dentry *debugfs_root;
224 enum db_state db_state;
228 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
230 return container_of(ibdev, struct c4iw_dev, ibdev);
233 static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
235 return container_of(rdev, struct c4iw_dev, rdev);
238 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
240 return idr_find(&rhp->cqidr, cqid);
243 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
245 return idr_find(&rhp->qpidr, qpid);
248 static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
250 return idr_find(&rhp->mmidr, mmid);
253 static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
254 void *handle, u32 id, int lock)
260 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
263 spin_lock_irq(&rhp->lock);
264 ret = idr_get_new_above(idr, handle, id, &newid);
265 BUG_ON(!ret && newid != id);
267 spin_unlock_irq(&rhp->lock);
268 } while (ret == -EAGAIN);
273 static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
274 void *handle, u32 id)
276 return _insert_handle(rhp, idr, handle, id, 1);
279 static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
280 void *handle, u32 id)
282 return _insert_handle(rhp, idr, handle, id, 0);
285 static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
289 spin_lock_irq(&rhp->lock);
292 spin_unlock_irq(&rhp->lock);
295 static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
297 _remove_handle(rhp, idr, id, 1);
300 static inline void remove_handle_nolock(struct c4iw_dev *rhp,
301 struct idr *idr, u32 id)
303 _remove_handle(rhp, idr, id, 0);
309 struct c4iw_dev *rhp;
312 static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
314 return container_of(ibpd, struct c4iw_pd, ibpd);
317 struct tpt_attributes {
320 enum fw_ri_mem_perms perms;
329 u32 remote_invaliate_disable:1;
331 u32 mw_bind_enable:1;
337 struct ib_umem *umem;
338 struct c4iw_dev *rhp;
340 struct tpt_attributes attr;
343 static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
345 return container_of(ibmr, struct c4iw_mr, ibmr);
350 struct c4iw_dev *rhp;
352 struct tpt_attributes attr;
355 static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
357 return container_of(ibmw, struct c4iw_mw, ibmw);
360 struct c4iw_fr_page_list {
361 struct ib_fast_reg_page_list ibpl;
362 DECLARE_PCI_UNMAP_ADDR(mapping);
364 struct c4iw_dev *dev;
368 static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
369 struct ib_fast_reg_page_list *ibpl)
371 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
376 struct c4iw_dev *rhp;
379 spinlock_t comp_handler_lock;
381 wait_queue_head_t wait;
384 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
386 return container_of(ibcq, struct c4iw_cq, ibcq);
389 struct c4iw_mpa_attributes {
391 u8 recv_marker_enabled;
392 u8 xmit_marker_enabled;
394 u8 enhanced_rdma_conn;
399 struct c4iw_qp_attributes {
405 u32 sq_max_sges_rdma_write;
409 u8 enable_rdma_write;
411 u8 enable_mmid0_fastreg;
416 char terminate_buffer[52];
417 u32 terminate_msg_len;
418 u8 is_terminate_local;
419 struct c4iw_mpa_attributes mpa_attr;
420 struct c4iw_ep *llp_stream_handle;
429 struct c4iw_dev *rhp;
431 struct c4iw_qp_attributes attr;
436 wait_queue_head_t wait;
437 struct timer_list timer;
440 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
442 return container_of(ibqp, struct c4iw_qp, ibqp);
445 struct c4iw_ucontext {
446 struct ib_ucontext ibucontext;
447 struct c4iw_dev_ucontext uctx;
449 spinlock_t mmap_lock;
450 struct list_head mmaps;
453 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
455 return container_of(c, struct c4iw_ucontext, ibucontext);
458 struct c4iw_mm_entry {
459 struct list_head entry;
465 static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
466 u32 key, unsigned len)
468 struct list_head *pos, *nxt;
469 struct c4iw_mm_entry *mm;
471 spin_lock(&ucontext->mmap_lock);
472 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
474 mm = list_entry(pos, struct c4iw_mm_entry, entry);
475 if (mm->key == key && mm->len == len) {
476 list_del_init(&mm->entry);
477 spin_unlock(&ucontext->mmap_lock);
478 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d",
479 __func__, key, (unsigned long long) mm->addr,
484 spin_unlock(&ucontext->mmap_lock);
488 static inline void insert_mmap(struct c4iw_ucontext *ucontext,
489 struct c4iw_mm_entry *mm)
491 spin_lock(&ucontext->mmap_lock);
492 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key,
493 (unsigned long long) mm->addr, mm->len);
494 list_add_tail(&mm->entry, &ucontext->mmaps);
495 spin_unlock(&ucontext->mmap_lock);
498 enum c4iw_qp_attr_mask {
499 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
500 C4IW_QP_ATTR_SQ_DB = 1<<1,
501 C4IW_QP_ATTR_RQ_DB = 1<<2,
502 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
503 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
504 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
505 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
506 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
507 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
508 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
509 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
510 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
511 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
512 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
513 C4IW_QP_ATTR_MAX_ORD |
514 C4IW_QP_ATTR_MAX_IRD |
515 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
516 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
517 C4IW_QP_ATTR_MPA_ATTR |
518 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
521 int c4iw_modify_qp(struct c4iw_dev *rhp,
523 enum c4iw_qp_attr_mask mask,
524 struct c4iw_qp_attributes *attrs,
531 C4IW_QP_STATE_TERMINATE,
532 C4IW_QP_STATE_CLOSING,
536 static inline int c4iw_convert_state(enum ib_qp_state ib_state)
541 return C4IW_QP_STATE_IDLE;
543 return C4IW_QP_STATE_RTS;
545 return C4IW_QP_STATE_CLOSING;
547 return C4IW_QP_STATE_TERMINATE;
549 return C4IW_QP_STATE_ERROR;
555 static inline int to_ib_qp_state(int c4iw_qp_state)
557 switch (c4iw_qp_state) {
558 case C4IW_QP_STATE_IDLE:
560 case C4IW_QP_STATE_RTS:
562 case C4IW_QP_STATE_CLOSING:
564 case C4IW_QP_STATE_TERMINATE:
566 case C4IW_QP_STATE_ERROR:
572 static inline u32 c4iw_ib_to_tpt_access(int a)
574 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
575 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
576 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
577 FW_RI_MEM_ACCESS_LOCAL_READ;
580 static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
582 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
583 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
586 enum c4iw_mmid_state {
587 C4IW_STAG_STATE_VALID,
588 C4IW_STAG_STATE_INVALID
591 #define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications"
593 #define MPA_KEY_REQ "MPA ID Req Frame"
594 #define MPA_KEY_REP "MPA ID Rep Frame"
596 #define MPA_MAX_PRIVATE_DATA 256
597 #define MPA_ENHANCED_RDMA_CONN 0x10
598 #define MPA_REJECT 0x20
600 #define MPA_MARKERS 0x80
601 #define MPA_FLAGS_MASK 0xE0
603 #define MPA_V2_PEER2PEER_MODEL 0x8000
604 #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
605 #define MPA_V2_RDMA_WRITE_RTR 0x8000
606 #define MPA_V2_RDMA_READ_RTR 0x4000
607 #define MPA_V2_IRD_ORD_MASK 0x3FFF
609 /* Fixme: Use atomic_read for kref.count as same as Linux */
610 #define c4iw_put_ep(ep) { \
611 CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \
612 __func__, __LINE__, ep, (ep)->kref.count); \
613 WARN_ON((ep)->kref.count < 1); \
614 kref_put(&((ep)->kref), _c4iw_free_ep); \
617 /* Fixme: Use atomic_read for kref.count as same as Linux */
618 #define c4iw_get_ep(ep) { \
619 CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \
620 __func__, __LINE__, ep, (ep)->kref.count); \
621 kref_get(&((ep)->kref)); \
624 void _c4iw_free_ep(struct kref *kref);
630 __be16 private_data_size;
634 struct mpa_v2_conn_params {
639 struct terminate_message {
646 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
648 enum c4iw_layers_types {
652 RDMAP_LOCAL_CATA = 0x00,
653 RDMAP_REMOTE_PROT = 0x01,
654 RDMAP_REMOTE_OP = 0x02,
655 DDP_LOCAL_CATA = 0x00,
656 DDP_TAGGED_ERR = 0x01,
657 DDP_UNTAGGED_ERR = 0x02,
661 enum c4iw_rdma_ecodes {
662 RDMAP_INV_STAG = 0x00,
663 RDMAP_BASE_BOUNDS = 0x01,
664 RDMAP_ACC_VIOL = 0x02,
665 RDMAP_STAG_NOT_ASSOC = 0x03,
666 RDMAP_TO_WRAP = 0x04,
667 RDMAP_INV_VERS = 0x05,
668 RDMAP_INV_OPCODE = 0x06,
669 RDMAP_STREAM_CATA = 0x07,
670 RDMAP_GLOBAL_CATA = 0x08,
671 RDMAP_CANT_INV_STAG = 0x09,
672 RDMAP_UNSPECIFIED = 0xff
675 enum c4iw_ddp_ecodes {
676 DDPT_INV_STAG = 0x00,
677 DDPT_BASE_BOUNDS = 0x01,
678 DDPT_STAG_NOT_ASSOC = 0x02,
680 DDPT_INV_VERS = 0x04,
682 DDPU_INV_MSN_NOBUF = 0x02,
683 DDPU_INV_MSN_RANGE = 0x03,
685 DDPU_MSG_TOOBIG = 0x05,
689 enum c4iw_mpa_ecodes {
691 MPA_MARKER_ERR = 0x03,
692 MPA_LOCAL_CATA = 0x05,
693 MPA_INSUFF_IRD = 0x06,
694 MPA_NOMATCH_RTR = 0x07,
713 PEER_ABORT_IN_PROGRESS = 0,
714 ABORT_REQ_IN_PROGRESS = 1,
715 RELEASE_RESOURCES = 2,
720 enum c4iw_ep_history {
740 CONN_RPL_UPCALL = 19,
741 ACT_RETRY_NOMEM = 20,
745 struct c4iw_ep_common {
746 TAILQ_ENTRY(c4iw_ep_common) entry; /* Work queue attachment */
747 struct iw_cm_id *cm_id;
749 struct c4iw_dev *dev;
750 enum c4iw_ep_state state;
753 struct sockaddr_in local_addr;
754 struct sockaddr_in remote_addr;
755 struct c4iw_wr_wait wr_wait;
757 unsigned long history;
760 struct thread *thread;
764 struct c4iw_listen_ep {
765 struct c4iw_ep_common com;
771 struct c4iw_ep_common com;
772 struct c4iw_ep *parent_ep;
773 struct timer_list timer;
774 struct list_head entry;
779 struct l2t_entry *l2t;
780 struct dst_entry *dst;
781 struct c4iw_mpa_attributes mpa_attr;
782 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
783 unsigned int mpa_pkt_len;
796 u8 retry_with_mpa_v1;
797 u8 tried_with_mpa_v1;
800 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
802 return cm_id->provider_data;
805 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
807 return cm_id->provider_data;
810 static inline int compute_wscale(int win)
814 while (wscale < 14 && (65535<<wscale) < win)
819 u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
820 void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
821 int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
822 u32 reserved, u32 flags);
823 void c4iw_id_table_free(struct c4iw_id_table *alloc);
825 typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m);
827 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
828 struct l2t_entry *l2t);
829 u32 c4iw_get_resource(struct c4iw_id_table *id_table);
830 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
831 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
832 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
833 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
834 int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
835 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
836 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
837 void c4iw_destroy_resource(struct c4iw_resource *rscp);
838 int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
839 int c4iw_register_device(struct c4iw_dev *dev);
840 void c4iw_unregister_device(struct c4iw_dev *dev);
841 int __init c4iw_cm_init(void);
842 void __exit c4iw_cm_term(void);
843 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
844 struct c4iw_dev_ucontext *uctx);
845 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
846 struct c4iw_dev_ucontext *uctx);
847 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
848 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
849 struct ib_send_wr **bad_wr);
850 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
851 struct ib_recv_wr **bad_wr);
852 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
853 struct ib_mw_bind *mw_bind);
854 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
855 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
856 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
857 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
858 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
859 void c4iw_qp_add_ref(struct ib_qp *qp);
860 void c4iw_qp_rem_ref(struct ib_qp *qp);
861 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
862 struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
863 struct ib_device *device,
865 struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
866 int c4iw_dealloc_mw(struct ib_mw *mw);
867 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
868 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64
869 virt, int acc, struct ib_udata *udata, int mr_id);
870 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
871 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
872 struct ib_phys_buf *buffer_list,
876 int c4iw_reregister_phys_mem(struct ib_mr *mr,
879 struct ib_phys_buf *buffer_list,
881 int acc, u64 *iova_start);
882 int c4iw_dereg_mr(struct ib_mr *ib_mr);
883 int c4iw_destroy_cq(struct ib_cq *ib_cq);
884 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
886 struct ib_ucontext *ib_context,
887 struct ib_udata *udata);
888 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
889 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
890 int c4iw_destroy_qp(struct ib_qp *ib_qp);
891 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
892 struct ib_qp_init_attr *attrs,
893 struct ib_udata *udata);
894 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
895 int attr_mask, struct ib_udata *udata);
896 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
897 int attr_mask, struct ib_qp_init_attr *init_attr);
898 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
899 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
900 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
901 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
902 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
903 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m);
904 void c4iw_flush_hw_cq(struct t4_cq *cq);
905 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
906 void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
907 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
908 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
909 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
910 int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *);
911 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
912 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
913 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
914 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
915 struct c4iw_dev_ucontext *uctx);
916 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
917 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
918 struct c4iw_dev_ucontext *uctx);
919 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
921 extern struct cxgb4_client t4c_client;
922 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
923 extern int c4iw_max_read_depth;
925 #include <sys/blist.h>
930 struct mutex gen_lock;
933 static __inline struct gen_pool *
934 gen_pool_create(daddr_t base, u_int chunk_shift, u_int len)
938 gp = malloc(sizeof(struct gen_pool), M_DEVBUF, M_NOWAIT);
942 memset(gp, 0, sizeof(struct gen_pool));
943 gp->gen_list = blist_create(len >> chunk_shift, M_NOWAIT);
944 if (gp->gen_list == NULL) {
948 blist_free(gp->gen_list, 0, len >> chunk_shift);
950 gp->gen_chunk_shift = chunk_shift;
951 //mutex_init(&gp->gen_lock, "genpool", NULL, MTX_DUPOK|MTX_DEF);
952 mutex_init(&gp->gen_lock);
957 static __inline unsigned long
958 gen_pool_alloc(struct gen_pool *gp, int size)
963 chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
964 mutex_lock(&gp->gen_lock);
965 blkno = blist_alloc(gp->gen_list, chunks);
966 mutex_unlock(&gp->gen_lock);
968 if (blkno == SWAPBLK_NONE)
971 return (gp->gen_base + ((1 << gp->gen_chunk_shift) * blkno));
975 gen_pool_free(struct gen_pool *gp, daddr_t address, int size)
980 chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
981 blkno = (address - gp->gen_base) / (1 << gp->gen_chunk_shift);
982 mutex_lock(&gp->gen_lock);
983 blist_free(gp->gen_list, blkno, chunks);
984 mutex_unlock(&gp->gen_lock);
988 gen_pool_destroy(struct gen_pool *gp)
990 blist_destroy(gp->gen_list);
994 #if defined(__i386__) || defined(__amd64__)
995 #define L1_CACHE_BYTES 128
997 #define L1_CACHE_BYTES 32
1001 int idr_for_each(struct idr *idp,
1002 int (*fn)(int id, void *p, void *data), void *data)
1004 int n, id, max, error = 0;
1005 struct idr_layer *p;
1006 struct idr_layer *pa[MAX_LEVEL];
1007 struct idr_layer **paa = &pa[0];
1009 n = idp->layers * IDR_BITS;
1015 while (n > 0 && p) {
1018 p = p->ary[(id >> n) & IDR_MASK];
1022 error = fn(id, (void *)p, data);
1028 while (n < fls(id)) {
1037 void c4iw_cm_init_cpl(struct adapter *);
1038 void c4iw_cm_term_cpl(struct adapter *);
1040 void your_reg_device(struct c4iw_dev *dev);
1042 #define SGE_CTRLQ_NUM 0
1044 extern int spg_creds;/* Status Page size in credit units(1 unit = 64) */