4 #define LINUXKPI_PARAM_PREFIX ib_sdp_
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/malloc.h>
13 #include <sys/kernel.h>
14 #include <sys/sysctl.h>
17 #include <sys/rwlock.h>
18 #include <sys/socket.h>
19 #include <sys/socketvar.h>
20 #include <sys/protosw.h>
23 #include <sys/domain.h>
30 #include <net/if_var.h>
31 #include <net/route.h>
34 #include <netinet/in.h>
35 #include <netinet/in_systm.h>
36 #include <netinet/in_var.h>
37 #include <netinet/in_pcb.h>
38 #include <netinet/tcp.h>
39 #include <netinet/tcp_fsm.h>
40 #include <netinet/tcp_timer.h>
41 #include <netinet/tcp_var.h>
43 #include <linux/device.h>
44 #include <linux/err.h>
45 #include <linux/sched.h>
46 #include <linux/workqueue.h>
47 #include <linux/wait.h>
48 #include <linux/module.h>
49 #include <linux/moduleparam.h>
50 #include <linux/pci.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
54 #include <rdma/ib_cm.h>
55 #include <rdma/sdp_socket.h>
56 #include <rdma/ib_fmr_pool.h>
59 #define CONFIG_INFINIBAND_SDP_DEBUG
65 /* From sys/queue.h */
66 #define LIST_HEAD(name, type) \
68 struct type *lh_first; /* first element */ \
71 /* Interval between successive polls in the Tx routine when polling is used
72 instead of interrupts (in per-core Tx rings) - should be power of 2 */
73 #define SDP_TX_POLL_MODER 16
74 #define SDP_TX_POLL_TIMEOUT (HZ / 20)
75 #define SDP_NAGLE_TIMEOUT (HZ / 10)
77 #define SDP_SRCAVAIL_CANCEL_TIMEOUT (HZ * 5)
78 #define SDP_SRCAVAIL_ADV_TIMEOUT (1 * HZ)
79 #define SDP_SRCAVAIL_PAYLOAD_LEN 1
81 #define SDP_RESOLVE_TIMEOUT 1000
82 #define SDP_ROUTE_TIMEOUT 1000
83 #define SDP_RETRY_COUNT 5
84 #define SDP_KEEPALIVE_TIME (120 * 60 * HZ)
85 #define SDP_FIN_WAIT_TIMEOUT (60 * HZ) /* like TCP_FIN_TIMEOUT */
87 #define SDP_TX_SIZE 0x40
88 #define SDP_RX_SIZE 0x40
90 #define SDP_FMR_SIZE (MIN(0x1000, PAGE_SIZE) / sizeof(u64))
91 #define SDP_FMR_POOL_SIZE 1024
92 #define SDP_FMR_DIRTY_SIZE ( SDP_FMR_POOL_SIZE / 4 )
94 #define SDP_MAX_RDMA_READ_LEN (PAGE_SIZE * (SDP_FMR_SIZE - 2))
96 /* mb inlined data len - rest will be rx'ed into frags */
97 #define SDP_HEAD_SIZE (sizeof(struct sdp_bsdh))
99 /* limit tx payload len, if the sink supports bigger buffers than the source
101 * or rx fragment size (limited by sge->length size) */
102 #define SDP_MAX_PACKET (1 << 16)
103 #define SDP_MAX_PAYLOAD (SDP_MAX_PACKET - SDP_HEAD_SIZE)
105 #define SDP_MAX_RECV_SGES (SDP_MAX_PACKET / MCLBYTES)
106 #define SDP_MAX_SEND_SGES (SDP_MAX_PACKET / MCLBYTES) + 2
110 #define SDP_DEF_ZCOPY_THRESH 64*1024
111 #define SDP_MIN_ZCOPY_THRESH PAGE_SIZE
112 #define SDP_MAX_ZCOPY_THRESH 1048576
114 #define SDP_OP_RECV 0x800000000LL
115 #define SDP_OP_SEND 0x400000000LL
116 #define SDP_OP_RDMA 0x200000000LL
117 #define SDP_OP_NOP 0x100000000LL
119 /* how long (in jiffies) to block sender till tx completion*/
120 #define SDP_BZCOPY_POLL_TIMEOUT (HZ / 10)
122 #define SDP_AUTO_CONF 0xffff
123 #define AUTO_MOD_DELAY (HZ / 4)
126 __u32 seq; /* Starting sequence number */
127 struct bzcopy_state *bz;
128 struct rx_srcavail_state *rx_sa;
129 struct tx_srcavail_state *tx_sa;
132 #define M_PUSH M_PROTO1 /* Do a 'push'. */
133 #define M_URG M_PROTO2 /* Mark as urgent (oob). */
135 #define SDP_SKB_CB(__mb) ((struct sdp_mb_cb *)&((__mb)->cb[0]))
136 #define BZCOPY_STATE(mb) (SDP_SKB_CB(mb)->bz)
137 #define RX_SRCAVAIL_STATE(mb) (SDP_SKB_CB(mb)->rx_sa)
138 #define TX_SRCAVAIL_STATE(mb) (SDP_SKB_CB(mb)->tx_sa)
141 #define MIN(a, b) (a < b ? a : b)
144 #define ring_head(ring) (atomic_read(&(ring).head))
145 #define ring_tail(ring) (atomic_read(&(ring).tail))
146 #define ring_posted(ring) (ring_head(ring) - ring_tail(ring))
148 #define rx_ring_posted(ssk) ring_posted(ssk->rx_ring)
150 #define tx_ring_posted(ssk) (ring_posted(ssk->tx_ring) + \
151 (ssk->tx_ring.rdma_inflight ? ssk->tx_ring.rdma_inflight->busy : 0))
153 #define tx_ring_posted(ssk) ring_posted(ssk->tx_ring)
156 extern int sdp_zcopy_thresh;
157 extern int rcvbuf_initial_size;
158 extern struct workqueue_struct *rx_comp_wq;
159 extern struct ib_client sdp_client;
163 SDP_MID_HELLO_ACK = 0x1,
164 SDP_MID_DISCONN = 0x2,
166 SDP_MID_SENDSM = 0x4,
167 SDP_MID_RDMARDCOMPL = 0x6,
168 SDP_MID_SRCAVAIL_CANCEL = 0x8,
169 SDP_MID_CHRCVBUF = 0xB,
170 SDP_MID_CHRCVBUF_ACK = 0xC,
171 SDP_MID_SINKAVAIL = 0xFD,
172 SDP_MID_SRCAVAIL = 0xFE,
177 SDP_OOB_PRES = 1 << 0,
178 SDP_OOB_PEND = 1 << 1,
182 SDP_MIN_TX_CREDITS = 2
199 } __attribute__((__packed__));
207 } __attribute__((__packed__));
209 /* TODO: too much? Can I avoid having the src/dst and port here? */
211 struct sdp_bsdh bsdh;
220 union cma_ip_addr src_addr;
221 union cma_ip_addr dst_addr;
222 u8 rsvd3[IB_CM_REQ_PRIVATE_DATA_SIZE - sizeof(struct sdp_bsdh) - 48];
223 } __attribute__((__packed__));
226 struct sdp_bsdh bsdh;
232 u8 rsvd2[IB_CM_REP_PRIVATE_DATA_SIZE - sizeof(struct sdp_bsdh) - 8];
233 } __attribute__((__packed__));
237 } __attribute__((__packed__));
243 } __attribute__((__packed__));
247 u64 mapping[SDP_MAX_SEND_SGES];
248 } __attribute__((__packed__));
250 struct sdp_chrecvbuf {
252 } __attribute__((__packed__));
254 /* Context used for synchronous zero copy bcopy (BZCOPY) */
255 struct bzcopy_state {
256 unsigned char __user *u_base;
263 struct sdp_sock *ssk;
273 TX_SA_CROSS_SEND = 0x02,
274 TX_SA_INTRRUPTED = 0x04,
275 TX_SA_TIMEDOUT = 0x08,
279 struct rx_srcavail_state {
280 /* Advertised buffer stuff */
289 struct ib_umem *umem;
290 struct ib_pool_fmr *fmr;
294 enum rx_sa_flag flags;
297 struct tx_srcavail_state {
298 /* Data below 'busy' will be reset */
301 struct ib_umem *umem;
302 struct ib_pool_fmr *fmr;
307 enum tx_sa_flag abort_flags;
315 struct rx_srcavail_state *rdma_inflight;
317 struct sdp_buf *buffer;
323 #define tx_credits(ssk) (atomic_read(&ssk->tx_ring.credits))
325 struct callout timer;
330 struct sdp_buf *buffer;
336 struct rwlock destroyed_lock;
342 struct ib_fmr_pool *fmr_pool;
345 struct sdp_moderation {
346 unsigned long last_moder_packets;
347 unsigned long last_moder_tx_packets;
348 unsigned long last_moder_bytes;
349 unsigned long last_moder_jiffies;
359 u16 adaptive_rx_coal;
366 /* These are flags fields. */
367 #define SDP_TIMEWAIT 0x0001 /* In ssk timewait state. */
368 #define SDP_DROPPED 0x0002 /* Socket has been dropped. */
369 #define SDP_SOCKREF 0x0004 /* Holding a sockref for close. */
370 #define SDP_NODELAY 0x0008 /* Disble nagle. */
371 #define SDP_NEEDFIN 0x0010 /* Send a fin on the next tx. */
372 #define SDP_DREQWAIT 0x0020 /* Waiting on DREQ. */
373 #define SDP_DESTROY 0x0040 /* Being destroyed. */
374 #define SDP_DISCON 0x0080 /* rdma_disconnect is owed. */
376 /* These are oobflags */
377 #define SDP_HADOOB 0x0001 /* Had OOB data. */
378 #define SDP_HAVEOOB 0x0002 /* Have OOB data. */
381 LIST_ENTRY(sdp_sock) list;
382 struct socket *socket;
383 struct rdma_cm_id *id;
384 struct ib_device *ib_device;
385 struct sdp_device *sdp_dev;
388 struct callout keep2msl; /* 2msl and keepalive timer. */
389 struct callout nagle_timer; /* timeout waiting for ack */
390 struct ib_ucontext context;
396 int oobflags; /* protected by rx lock. */
399 int recv_bytes; /* Bytes per recv. buf including header */
403 struct sdp_rx_ring rx_ring;
404 struct sdp_tx_ring tx_ring;
406 struct mbuf *rx_ctl_q;
407 struct mbuf *rx_ctl_tail;
409 int qp_active; /* XXX Flag. */
411 struct work_struct rx_comp_work;
412 #define rcv_nxt(ssk) atomic_read(&(ssk->rcv_nxt))
417 #define mseq_ack(ssk) (atomic_read(&ssk->mseq_ack))
418 unsigned max_bufs; /* Initial buffers offered by other side */
419 unsigned min_bufs; /* Low water mark to wake senders */
421 unsigned long nagle_last_unacked; /* mseq of lastest unacked packet */
423 atomic_t remote_credits;
424 #define remote_credits(ssk) (atomic_read(&ssk->remote_credits))
428 int recv_request_head; /* mark the rx_head when the resize request
430 int recv_request; /* XXX flag if request to resize was received */
432 unsigned long tx_packets;
433 unsigned long rx_packets;
434 unsigned long tx_bytes;
435 unsigned long rx_bytes;
436 struct sdp_moderation auto_mod;
437 struct task shutdown_task;
439 struct tx_srcavail_state *tx_sa;
440 struct rx_srcavail_state *rx_sa;
441 spinlock_t tx_sa_lock;
442 struct delayed_work srcavail_cancel_work;
443 int srcavail_cancel_mseq;
444 /* ZCOPY data: -1:use global; 0:disable zcopy; >0: zcopy threshold */
449 #define sdp_sk(so) ((struct sdp_sock *)(so->so_pcb))
451 #define SDP_RLOCK(ssk) rw_rlock(&(ssk)->lock)
452 #define SDP_WLOCK(ssk) rw_wlock(&(ssk)->lock)
453 #define SDP_RUNLOCK(ssk) rw_runlock(&(ssk)->lock)
454 #define SDP_WUNLOCK(ssk) rw_wunlock(&(ssk)->lock)
455 #define SDP_WLOCK_ASSERT(ssk) rw_assert(&(ssk)->lock, RA_WLOCKED)
456 #define SDP_RLOCK_ASSERT(ssk) rw_assert(&(ssk)->lock, RA_RLOCKED)
457 #define SDP_LOCK_ASSERT(ssk) rw_assert(&(ssk)->lock, RA_LOCKED)
459 static inline void tx_sa_reset(struct tx_srcavail_state *tx_sa)
461 memset((void *)&tx_sa->busy, 0,
462 sizeof(*tx_sa) - offsetof(typeof(*tx_sa), busy));
465 static inline void rx_ring_unlock(struct sdp_rx_ring *rx_ring)
467 rw_runlock(&rx_ring->destroyed_lock);
470 static inline int rx_ring_trylock(struct sdp_rx_ring *rx_ring)
472 rw_rlock(&rx_ring->destroyed_lock);
473 if (rx_ring->destroyed) {
474 rx_ring_unlock(rx_ring);
480 static inline void rx_ring_destroy_lock(struct sdp_rx_ring *rx_ring)
482 rw_wlock(&rx_ring->destroyed_lock);
483 rx_ring->destroyed = 1;
484 rw_wunlock(&rx_ring->destroyed_lock);
487 static inline void sdp_arm_rx_cq(struct sdp_sock *ssk)
489 sdp_prf(ssk->socket, NULL, "Arming RX cq");
490 sdp_dbg_data(ssk->socket, "Arming RX cq\n");
492 ib_req_notify_cq(ssk->rx_ring.cq, IB_CQ_NEXT_COMP);
495 static inline void sdp_arm_tx_cq(struct sdp_sock *ssk)
497 sdp_prf(ssk->socket, NULL, "Arming TX cq");
498 sdp_dbg_data(ssk->socket, "Arming TX cq. credits: %d, posted: %d\n",
499 tx_credits(ssk), tx_ring_posted(ssk));
501 ib_req_notify_cq(ssk->tx_ring.cq, IB_CQ_NEXT_COMP);
504 /* return the min of:
506 * - free slots in tx_ring (not including SDP_MIN_TX_CREDITS
508 static inline int tx_slots_free(struct sdp_sock *ssk)
512 min_free = MIN(tx_credits(ssk),
513 SDP_TX_SIZE - tx_ring_posted(ssk));
514 if (min_free < SDP_MIN_TX_CREDITS)
517 return min_free - SDP_MIN_TX_CREDITS;
521 static inline char *mid2str(int mid)
523 #define ENUM2STR(e) [e] = #e
524 static char *mid2str[] = {
525 ENUM2STR(SDP_MID_HELLO),
526 ENUM2STR(SDP_MID_HELLO_ACK),
527 ENUM2STR(SDP_MID_ABORT),
528 ENUM2STR(SDP_MID_DISCONN),
529 ENUM2STR(SDP_MID_SENDSM),
530 ENUM2STR(SDP_MID_RDMARDCOMPL),
531 ENUM2STR(SDP_MID_SRCAVAIL_CANCEL),
532 ENUM2STR(SDP_MID_CHRCVBUF),
533 ENUM2STR(SDP_MID_CHRCVBUF_ACK),
534 ENUM2STR(SDP_MID_DATA),
535 ENUM2STR(SDP_MID_SRCAVAIL),
536 ENUM2STR(SDP_MID_SINKAVAIL),
539 if (mid >= ARRAY_SIZE(mid2str))
545 static inline struct mbuf *
546 sdp_alloc_mb(struct socket *sk, u8 mid, int size, int wait)
551 MGETHDR(mb, wait, MT_DATA);
554 mb->m_pkthdr.len = mb->m_len = sizeof(struct sdp_bsdh);
555 h = mtod(mb, struct sdp_bsdh *);
560 static inline struct mbuf *
561 sdp_alloc_mb_data(struct socket *sk, int wait)
563 return sdp_alloc_mb(sk, SDP_MID_DATA, 0, wait);
566 static inline struct mbuf *
567 sdp_alloc_mb_disconnect(struct socket *sk, int wait)
569 return sdp_alloc_mb(sk, SDP_MID_DISCONN, 0, wait);
573 mb_put(struct mbuf *mb, int len)
583 static inline struct mbuf *
584 sdp_alloc_mb_chrcvbuf_ack(struct socket *sk, int size, int wait)
587 struct sdp_chrecvbuf *resp_size;
589 mb = sdp_alloc_mb(sk, SDP_MID_CHRCVBUF_ACK, sizeof(*resp_size), wait);
592 resp_size = (struct sdp_chrecvbuf *)mb_put(mb, sizeof *resp_size);
593 resp_size->size = htonl(size);
598 static inline struct mbuf *
599 sdp_alloc_mb_srcavail(struct socket *sk, u32 len, u32 rkey, u64 vaddr, int wait)
602 struct sdp_srcah *srcah;
604 mb = sdp_alloc_mb(sk, SDP_MID_SRCAVAIL, sizeof(*srcah), wait);
607 srcah = (struct sdp_srcah *)mb_put(mb, sizeof(*srcah));
608 srcah->len = htonl(len);
609 srcah->rkey = htonl(rkey);
610 srcah->vaddr = cpu_to_be64(vaddr);
615 static inline struct mbuf *
616 sdp_alloc_mb_srcavail_cancel(struct socket *sk, int wait)
618 return sdp_alloc_mb(sk, SDP_MID_SRCAVAIL_CANCEL, 0, wait);
621 static inline struct mbuf *
622 sdp_alloc_mb_rdmardcompl(struct socket *sk, u32 len, int wait)
625 struct sdp_rrch *rrch;
627 mb = sdp_alloc_mb(sk, SDP_MID_RDMARDCOMPL, sizeof(*rrch), wait);
630 rrch = (struct sdp_rrch *)mb_put(mb, sizeof(*rrch));
631 rrch->len = htonl(len);
636 static inline struct mbuf *
637 sdp_alloc_mb_sendsm(struct socket *sk, int wait)
639 return sdp_alloc_mb(sk, SDP_MID_SENDSM, 0, wait);
641 static inline int sdp_tx_ring_slots_left(struct sdp_sock *ssk)
643 return SDP_TX_SIZE - tx_ring_posted(ssk);
646 static inline int credit_update_needed(struct sdp_sock *ssk)
650 c = remote_credits(ssk);
651 if (likely(c > SDP_MIN_TX_CREDITS))
653 return unlikely(c < rx_ring_posted(ssk)) &&
654 likely(tx_credits(ssk) > 0) &&
655 likely(sdp_tx_ring_slots_left(ssk));
659 #define SDPSTATS_COUNTER_INC(stat)
660 #define SDPSTATS_COUNTER_ADD(stat, val)
661 #define SDPSTATS_COUNTER_MID_INC(stat, mid)
662 #define SDPSTATS_HIST_LINEAR(stat, size)
663 #define SDPSTATS_HIST(stat, size)
666 sdp_cleanup_sdp_buf(struct sdp_sock *ssk, struct sdp_buf *sbuf,
667 enum dma_data_direction dir)
669 struct ib_device *dev;
673 dev = ssk->ib_device;
674 for (i = 0, mb = sbuf->mb; mb != NULL; mb = mb->m_next, i++)
675 ib_dma_unmap_single(dev, sbuf->mapping[i], mb->m_len, dir);
679 void sdp_set_default_moderation(struct sdp_sock *ssk);
680 void sdp_start_keepalive_timer(struct socket *sk);
681 void sdp_urg(struct sdp_sock *ssk, struct mbuf *mb);
682 void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk);
683 void sdp_abort(struct socket *sk);
684 struct sdp_sock *sdp_notify(struct sdp_sock *ssk, int error);
688 int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *);
691 int sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device);
692 void sdp_tx_ring_destroy(struct sdp_sock *ssk);
693 int sdp_xmit_poll(struct sdp_sock *ssk, int force);
694 void sdp_post_send(struct sdp_sock *ssk, struct mbuf *mb);
695 void sdp_post_sends(struct sdp_sock *ssk, int wait);
696 void sdp_post_keepalive(struct sdp_sock *ssk);
699 void sdp_rx_ring_init(struct sdp_sock *ssk);
700 int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device);
701 void sdp_rx_ring_destroy(struct sdp_sock *ssk);
702 int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size);
703 int sdp_init_buffers(struct sdp_sock *ssk, u32 new_size);
704 void sdp_do_posts(struct sdp_sock *ssk);
705 void sdp_rx_comp_full(struct sdp_sock *ssk);
709 int sdp_sendmsg_zcopy(struct kiocb *iocb, struct socket *sk, struct iovec *iov);
710 int sdp_handle_srcavail(struct sdp_sock *ssk, struct sdp_srcah *srcah);
711 void sdp_handle_sendsm(struct sdp_sock *ssk, u32 mseq_ack);
712 void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack,
713 u32 bytes_completed);
714 int sdp_handle_rdma_read_cqe(struct sdp_sock *ssk);
715 int sdp_rdma_to_iovec(struct socket *sk, struct iovec *iov, struct mbuf *mb,
716 unsigned long *used);
717 int sdp_post_rdma_rd_compl(struct sdp_sock *ssk,
718 struct rx_srcavail_state *rx_sa);
719 int sdp_post_sendsm(struct socket *sk);
720 void srcavail_cancel_timeout(struct work_struct *work);
721 void sdp_abort_srcavail(struct socket *sk);
722 void sdp_abort_rdma_read(struct socket *sk);
723 int sdp_process_rx(struct sdp_sock *ssk);