2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __IW_CXGB4_H__
34 #define __IW_CXGB4_H__
36 #include <linux/list.h>
37 #include <linux/spinlock.h>
38 #include <linux/idr.h>
39 #include <linux/completion.h>
40 #include <linux/netdevice.h>
41 #include <linux/sched.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/inet.h>
45 #include <linux/wait.h>
46 #include <linux/kref.h>
47 #include <linux/timer.h>
50 #include <asm/byteorder.h>
52 #include <netinet/in.h>
53 #include <netinet/toecore.h>
55 #include <rdma/ib_verbs.h>
56 #include <rdma/iw_cm.h>
60 #include "common/common.h"
61 #include "common/t4_msg.h"
62 #include "common/t4_regs.h"
63 #include "common/t4_tcb.h"
66 #define DRV_NAME "iw_cxgbe"
67 #define MOD DRV_NAME ":"
68 #define KTR_IW_CXGBE KTR_SPARE3
70 extern int c4iw_debug;
71 #define PDBG(fmt, args...) \
74 printf(MOD fmt, ## args); \
79 static inline void *cplhdr(struct mbuf *m)
81 return mtod(m, void*);
84 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start)
85 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start)
87 #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
88 #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
90 struct c4iw_id_table {
92 u32 start; /* logical minimal id */
93 u32 last; /* hint for find */
99 struct c4iw_resource {
100 struct c4iw_id_table tpt_table;
101 struct c4iw_id_table qid_table;
102 struct c4iw_id_table pdid_table;
105 struct c4iw_qid_list {
106 struct list_head entry;
110 struct c4iw_dev_ucontext {
111 struct list_head qpids;
112 struct list_head cqids;
116 enum c4iw_rdev_flags {
117 T4_FATAL_ERROR = (1<<0),
129 struct c4iw_stat qid;
131 struct c4iw_stat stag;
132 struct c4iw_stat pbl;
133 struct c4iw_stat rqt;
137 u64 db_state_transitions;
141 struct adapter *adap;
142 struct c4iw_resource resource;
143 unsigned long qpshift;
145 unsigned long cqshift;
147 struct c4iw_dev_ucontext uctx;
148 struct gen_pool *pbl_pool;
149 struct gen_pool *rqt_pool;
151 struct c4iw_stats stats;
154 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
156 return rdev->flags & T4_FATAL_ERROR;
159 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
161 return min((int)T4_MAX_NUM_STAG, (int)(rdev->adap->vres.stag.size >> 5));
164 #define C4IW_WR_TO (10*HZ)
166 struct c4iw_wr_wait {
171 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
174 atomic_set(&wr_waitp->completion, 0);
177 static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
180 atomic_set(&wr_waitp->completion, 1);
185 c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
186 u32 hwtid, u32 qpid, const char *func)
188 struct adapter *sc = rdev->adap;
189 unsigned to = C4IW_WR_TO;
191 while (!atomic_read(&wr_waitp->completion)) {
192 tsleep(wr_waitp, 0, "c4iw_wait", to);
193 if (SIGPENDING(curthread)) {
194 printf("%s - Device %s not responding - "
195 "tid %u qpid %u\n", func,
196 device_get_nameunit(sc->dev), hwtid, qpid);
197 if (c4iw_fatal_error(rdev)) {
198 wr_waitp->ret = -EIO;
205 CTR4(KTR_IW_CXGBE, "%s: FW reply %d tid %u qpid %u",
206 device_get_nameunit(sc->dev), wr_waitp->ret, hwtid, qpid);
207 return (wr_waitp->ret);
217 struct ib_device ibdev;
218 struct c4iw_rdev rdev;
219 u32 device_cap_flags;
224 struct dentry *debugfs_root;
225 enum db_state db_state;
229 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
231 return container_of(ibdev, struct c4iw_dev, ibdev);
234 static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
236 return container_of(rdev, struct c4iw_dev, rdev);
239 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
241 return idr_find(&rhp->cqidr, cqid);
244 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
246 return idr_find(&rhp->qpidr, qpid);
249 static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
251 return idr_find(&rhp->mmidr, mmid);
254 static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
255 void *handle, u32 id, int lock)
261 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
264 spin_lock_irq(&rhp->lock);
265 ret = idr_get_new_above(idr, handle, id, &newid);
266 BUG_ON(!ret && newid != id);
268 spin_unlock_irq(&rhp->lock);
269 } while (ret == -EAGAIN);
274 static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
275 void *handle, u32 id)
277 return _insert_handle(rhp, idr, handle, id, 1);
280 static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
281 void *handle, u32 id)
283 return _insert_handle(rhp, idr, handle, id, 0);
286 static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
290 spin_lock_irq(&rhp->lock);
293 spin_unlock_irq(&rhp->lock);
296 static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
298 _remove_handle(rhp, idr, id, 1);
301 static inline void remove_handle_nolock(struct c4iw_dev *rhp,
302 struct idr *idr, u32 id)
304 _remove_handle(rhp, idr, id, 0);
310 struct c4iw_dev *rhp;
313 static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
315 return container_of(ibpd, struct c4iw_pd, ibpd);
318 struct tpt_attributes {
321 enum fw_ri_mem_perms perms;
330 u32 remote_invaliate_disable:1;
332 u32 mw_bind_enable:1;
338 struct ib_umem *umem;
339 struct c4iw_dev *rhp;
341 struct tpt_attributes attr;
344 static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
346 return container_of(ibmr, struct c4iw_mr, ibmr);
351 struct c4iw_dev *rhp;
353 struct tpt_attributes attr;
356 static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
358 return container_of(ibmw, struct c4iw_mw, ibmw);
361 struct c4iw_fr_page_list {
362 struct ib_fast_reg_page_list ibpl;
363 DECLARE_PCI_UNMAP_ADDR(mapping);
365 struct c4iw_dev *dev;
369 static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
370 struct ib_fast_reg_page_list *ibpl)
372 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
377 struct c4iw_dev *rhp;
380 spinlock_t comp_handler_lock;
382 wait_queue_head_t wait;
385 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
387 return container_of(ibcq, struct c4iw_cq, ibcq);
390 struct c4iw_mpa_attributes {
392 u8 recv_marker_enabled;
393 u8 xmit_marker_enabled;
395 u8 enhanced_rdma_conn;
400 struct c4iw_qp_attributes {
406 u32 sq_max_sges_rdma_write;
410 u8 enable_rdma_write;
412 u8 enable_mmid0_fastreg;
417 char terminate_buffer[52];
418 u32 terminate_msg_len;
419 u8 is_terminate_local;
420 struct c4iw_mpa_attributes mpa_attr;
421 struct c4iw_ep *llp_stream_handle;
430 struct c4iw_dev *rhp;
432 struct c4iw_qp_attributes attr;
437 wait_queue_head_t wait;
438 struct timer_list timer;
441 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
443 return container_of(ibqp, struct c4iw_qp, ibqp);
446 struct c4iw_ucontext {
447 struct ib_ucontext ibucontext;
448 struct c4iw_dev_ucontext uctx;
450 spinlock_t mmap_lock;
451 struct list_head mmaps;
454 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
456 return container_of(c, struct c4iw_ucontext, ibucontext);
459 struct c4iw_mm_entry {
460 struct list_head entry;
466 static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
467 u32 key, unsigned len)
469 struct list_head *pos, *nxt;
470 struct c4iw_mm_entry *mm;
472 spin_lock(&ucontext->mmap_lock);
473 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
475 mm = list_entry(pos, struct c4iw_mm_entry, entry);
476 if (mm->key == key && mm->len == len) {
477 list_del_init(&mm->entry);
478 spin_unlock(&ucontext->mmap_lock);
479 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d",
480 __func__, key, (unsigned long long) mm->addr,
485 spin_unlock(&ucontext->mmap_lock);
489 static inline void insert_mmap(struct c4iw_ucontext *ucontext,
490 struct c4iw_mm_entry *mm)
492 spin_lock(&ucontext->mmap_lock);
493 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key,
494 (unsigned long long) mm->addr, mm->len);
495 list_add_tail(&mm->entry, &ucontext->mmaps);
496 spin_unlock(&ucontext->mmap_lock);
499 enum c4iw_qp_attr_mask {
500 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
501 C4IW_QP_ATTR_SQ_DB = 1<<1,
502 C4IW_QP_ATTR_RQ_DB = 1<<2,
503 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
504 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
505 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
506 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
507 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
508 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
509 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
510 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
511 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
512 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
513 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
514 C4IW_QP_ATTR_MAX_ORD |
515 C4IW_QP_ATTR_MAX_IRD |
516 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
517 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
518 C4IW_QP_ATTR_MPA_ATTR |
519 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
522 int c4iw_modify_qp(struct c4iw_dev *rhp,
524 enum c4iw_qp_attr_mask mask,
525 struct c4iw_qp_attributes *attrs,
532 C4IW_QP_STATE_TERMINATE,
533 C4IW_QP_STATE_CLOSING,
537 static inline int c4iw_convert_state(enum ib_qp_state ib_state)
542 return C4IW_QP_STATE_IDLE;
544 return C4IW_QP_STATE_RTS;
546 return C4IW_QP_STATE_CLOSING;
548 return C4IW_QP_STATE_TERMINATE;
550 return C4IW_QP_STATE_ERROR;
556 static inline int to_ib_qp_state(int c4iw_qp_state)
558 switch (c4iw_qp_state) {
559 case C4IW_QP_STATE_IDLE:
561 case C4IW_QP_STATE_RTS:
563 case C4IW_QP_STATE_CLOSING:
565 case C4IW_QP_STATE_TERMINATE:
567 case C4IW_QP_STATE_ERROR:
573 static inline u32 c4iw_ib_to_tpt_access(int a)
575 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
576 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
577 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
578 FW_RI_MEM_ACCESS_LOCAL_READ;
581 static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
583 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
584 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
587 enum c4iw_mmid_state {
588 C4IW_STAG_STATE_VALID,
589 C4IW_STAG_STATE_INVALID
592 #define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications"
594 #define MPA_KEY_REQ "MPA ID Req Frame"
595 #define MPA_KEY_REP "MPA ID Rep Frame"
597 #define MPA_MAX_PRIVATE_DATA 256
598 #define MPA_ENHANCED_RDMA_CONN 0x10
599 #define MPA_REJECT 0x20
601 #define MPA_MARKERS 0x80
602 #define MPA_FLAGS_MASK 0xE0
604 #define MPA_V2_PEER2PEER_MODEL 0x8000
605 #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
606 #define MPA_V2_RDMA_WRITE_RTR 0x8000
607 #define MPA_V2_RDMA_READ_RTR 0x4000
608 #define MPA_V2_IRD_ORD_MASK 0x3FFF
610 /* Fixme: Use atomic_read for kref.count as same as Linux */
611 #define c4iw_put_ep(ep) { \
612 CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \
613 __func__, __LINE__, ep, (ep)->kref.count); \
614 WARN_ON((ep)->kref.count < 1); \
615 kref_put(&((ep)->kref), _c4iw_free_ep); \
618 /* Fixme: Use atomic_read for kref.count as same as Linux */
619 #define c4iw_get_ep(ep) { \
620 CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \
621 __func__, __LINE__, ep, (ep)->kref.count); \
622 kref_get(&((ep)->kref)); \
625 void _c4iw_free_ep(struct kref *kref);
631 __be16 private_data_size;
635 struct mpa_v2_conn_params {
640 struct terminate_message {
647 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
649 enum c4iw_layers_types {
653 RDMAP_LOCAL_CATA = 0x00,
654 RDMAP_REMOTE_PROT = 0x01,
655 RDMAP_REMOTE_OP = 0x02,
656 DDP_LOCAL_CATA = 0x00,
657 DDP_TAGGED_ERR = 0x01,
658 DDP_UNTAGGED_ERR = 0x02,
662 enum c4iw_rdma_ecodes {
663 RDMAP_INV_STAG = 0x00,
664 RDMAP_BASE_BOUNDS = 0x01,
665 RDMAP_ACC_VIOL = 0x02,
666 RDMAP_STAG_NOT_ASSOC = 0x03,
667 RDMAP_TO_WRAP = 0x04,
668 RDMAP_INV_VERS = 0x05,
669 RDMAP_INV_OPCODE = 0x06,
670 RDMAP_STREAM_CATA = 0x07,
671 RDMAP_GLOBAL_CATA = 0x08,
672 RDMAP_CANT_INV_STAG = 0x09,
673 RDMAP_UNSPECIFIED = 0xff
676 enum c4iw_ddp_ecodes {
677 DDPT_INV_STAG = 0x00,
678 DDPT_BASE_BOUNDS = 0x01,
679 DDPT_STAG_NOT_ASSOC = 0x02,
681 DDPT_INV_VERS = 0x04,
683 DDPU_INV_MSN_NOBUF = 0x02,
684 DDPU_INV_MSN_RANGE = 0x03,
686 DDPU_MSG_TOOBIG = 0x05,
690 enum c4iw_mpa_ecodes {
692 MPA_MARKER_ERR = 0x03,
693 MPA_LOCAL_CATA = 0x05,
694 MPA_INSUFF_IRD = 0x06,
695 MPA_NOMATCH_RTR = 0x07,
714 PEER_ABORT_IN_PROGRESS = 0,
715 ABORT_REQ_IN_PROGRESS = 1,
716 RELEASE_RESOURCES = 2,
721 enum c4iw_ep_history {
741 CONN_RPL_UPCALL = 19,
742 ACT_RETRY_NOMEM = 20,
746 struct c4iw_ep_common {
747 TAILQ_ENTRY(c4iw_ep_common) entry; /* Work queue attachment */
748 struct iw_cm_id *cm_id;
750 struct c4iw_dev *dev;
751 enum c4iw_ep_state state;
754 struct sockaddr_in local_addr;
755 struct sockaddr_in remote_addr;
756 struct c4iw_wr_wait wr_wait;
758 unsigned long history;
761 struct thread *thread;
765 struct c4iw_listen_ep {
766 struct c4iw_ep_common com;
772 struct c4iw_ep_common com;
773 struct c4iw_ep *parent_ep;
774 struct timer_list timer;
775 struct list_head entry;
780 struct l2t_entry *l2t;
781 struct dst_entry *dst;
782 struct c4iw_mpa_attributes mpa_attr;
783 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
784 unsigned int mpa_pkt_len;
797 u8 retry_with_mpa_v1;
798 u8 tried_with_mpa_v1;
801 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
803 return cm_id->provider_data;
806 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
808 return cm_id->provider_data;
811 static inline int compute_wscale(int win)
815 while (wscale < 14 && (65535<<wscale) < win)
820 u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
821 void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
822 int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
823 u32 reserved, u32 flags);
824 void c4iw_id_table_free(struct c4iw_id_table *alloc);
826 typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m);
828 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
829 struct l2t_entry *l2t);
830 u32 c4iw_get_resource(struct c4iw_id_table *id_table);
831 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
832 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
833 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
834 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
835 int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
836 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
837 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
838 void c4iw_destroy_resource(struct c4iw_resource *rscp);
839 int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
840 int c4iw_register_device(struct c4iw_dev *dev);
841 void c4iw_unregister_device(struct c4iw_dev *dev);
842 int __init c4iw_cm_init(void);
843 void __exit c4iw_cm_term(void);
844 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
845 struct c4iw_dev_ucontext *uctx);
846 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
847 struct c4iw_dev_ucontext *uctx);
848 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
849 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
850 struct ib_send_wr **bad_wr);
851 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
852 struct ib_recv_wr **bad_wr);
853 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
854 struct ib_mw_bind *mw_bind);
855 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
856 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
857 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
858 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
859 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
860 void c4iw_qp_add_ref(struct ib_qp *qp);
861 void c4iw_qp_rem_ref(struct ib_qp *qp);
862 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
863 struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
864 struct ib_device *device,
866 struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
867 int c4iw_dealloc_mw(struct ib_mw *mw);
868 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
869 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64
870 virt, int acc, struct ib_udata *udata, int mr_id);
871 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
872 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
873 struct ib_phys_buf *buffer_list,
877 int c4iw_reregister_phys_mem(struct ib_mr *mr,
880 struct ib_phys_buf *buffer_list,
882 int acc, u64 *iova_start);
883 int c4iw_dereg_mr(struct ib_mr *ib_mr);
884 int c4iw_destroy_cq(struct ib_cq *ib_cq);
885 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
887 struct ib_ucontext *ib_context,
888 struct ib_udata *udata);
889 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
890 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
891 int c4iw_destroy_qp(struct ib_qp *ib_qp);
892 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
893 struct ib_qp_init_attr *attrs,
894 struct ib_udata *udata);
895 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
896 int attr_mask, struct ib_udata *udata);
897 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
898 int attr_mask, struct ib_qp_init_attr *init_attr);
899 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
900 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
901 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
902 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
903 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
904 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m);
905 void c4iw_flush_hw_cq(struct t4_cq *cq);
906 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
907 void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
908 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
909 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
910 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
911 int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *);
912 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
913 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
914 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
915 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
916 struct c4iw_dev_ucontext *uctx);
917 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
918 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
919 struct c4iw_dev_ucontext *uctx);
920 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
922 extern struct cxgb4_client t4c_client;
923 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
924 extern int c4iw_max_read_depth;
926 #include <sys/blist.h>
931 struct mutex gen_lock;
934 static __inline struct gen_pool *
935 gen_pool_create(daddr_t base, u_int chunk_shift, u_int len)
939 gp = malloc(sizeof(struct gen_pool), M_DEVBUF, M_NOWAIT);
943 memset(gp, 0, sizeof(struct gen_pool));
944 gp->gen_list = blist_create(len >> chunk_shift, M_NOWAIT);
945 if (gp->gen_list == NULL) {
949 blist_free(gp->gen_list, 0, len >> chunk_shift);
951 gp->gen_chunk_shift = chunk_shift;
952 //mutex_init(&gp->gen_lock, "genpool", NULL, MTX_DUPOK|MTX_DEF);
953 mutex_init(&gp->gen_lock);
958 static __inline unsigned long
959 gen_pool_alloc(struct gen_pool *gp, int size)
964 chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
965 mutex_lock(&gp->gen_lock);
966 blkno = blist_alloc(gp->gen_list, chunks);
967 mutex_unlock(&gp->gen_lock);
969 if (blkno == SWAPBLK_NONE)
972 return (gp->gen_base + ((1 << gp->gen_chunk_shift) * blkno));
976 gen_pool_free(struct gen_pool *gp, daddr_t address, int size)
981 chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
982 blkno = (address - gp->gen_base) / (1 << gp->gen_chunk_shift);
983 mutex_lock(&gp->gen_lock);
984 blist_free(gp->gen_list, blkno, chunks);
985 mutex_unlock(&gp->gen_lock);
989 gen_pool_destroy(struct gen_pool *gp)
991 blist_destroy(gp->gen_list);
995 #if defined(__i386__) || defined(__amd64__)
996 #define L1_CACHE_BYTES 128
998 #define L1_CACHE_BYTES 32
1002 int idr_for_each(struct idr *idp,
1003 int (*fn)(int id, void *p, void *data), void *data)
1005 int n, id, max, error = 0;
1006 struct idr_layer *p;
1007 struct idr_layer *pa[MAX_LEVEL];
1008 struct idr_layer **paa = &pa[0];
1010 n = idp->layers * IDR_BITS;
1016 while (n > 0 && p) {
1019 p = p->ary[(id >> n) & IDR_MASK];
1023 error = fn(id, (void *)p, data);
1029 while (n < fls(id)) {
1038 void c4iw_cm_init_cpl(struct adapter *);
1039 void c4iw_cm_term_cpl(struct adapter *);
1041 void your_reg_device(struct c4iw_dev *dev);
1043 #define SGE_CTRLQ_NUM 0
1045 extern int spg_creds;/* Status Page size in credit units(1 unit = 64) */