2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2006-2009 Open Grid Computing, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/err.h>
42 #include <linux/string.h>
43 #include <linux/inet.h>
44 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/pci.h>
48 #include <linux/sched.h>
49 #include <asm/system.h>
51 #include <asm/atomic.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/rdma_cm.h>
59 extern int krping_debug;
60 #define DEBUG_LOG(cb, x...) if (krping_debug) krping_printf((cb)->cookie, x)
61 #define PRINTF(cb, x...) krping_printf((cb)->cookie, x)
63 MODULE_AUTHOR("Steve Wise");
64 MODULE_DESCRIPTION("RDMA ping client/server");
65 MODULE_LICENSE("Dual BSD/GPL");
67 static __inline uint64_t
71 __asm __volatile("rdtsc" : "=a" (low), "=d" (high));
72 return (low | ((u_int64_t)high << 32));
75 typedef uint64_t cycles_t;
84 static const struct krping_option krping_opts[] = {
85 {"count", OPT_INT, 'C'},
86 {"size", OPT_INT, 'S'},
87 {"addr", OPT_STRING, 'a'},
88 {"port", OPT_INT, 'p'},
89 {"verbose", OPT_NOPARAM, 'v'},
90 {"validate", OPT_NOPARAM, 'V'},
91 {"server", OPT_NOPARAM, 's'},
92 {"client", OPT_NOPARAM, 'c'},
93 {"mem_mode", OPT_STRING, 'm'},
94 {"server_inv", OPT_NOPARAM, 'I'},
95 {"wlat", OPT_NOPARAM, 'l'},
96 {"rlat", OPT_NOPARAM, 'L'},
97 {"bw", OPT_NOPARAM, 'B'},
98 {"duplex", OPT_NOPARAM, 'd'},
99 {"txdepth", OPT_INT, 'T'},
100 {"poll", OPT_NOPARAM, 'P'},
101 {"local_dma_lkey", OPT_NOPARAM, 'Z'},
102 {"read_inv", OPT_NOPARAM, 'R'},
103 {"fr", OPT_NOPARAM, 'f'},
107 #define htonll(x) cpu_to_be64((x))
108 #define ntohll(x) cpu_to_be64((x))
110 static struct mutex krping_mutex;
113 * List of running krping threads.
115 static LIST_HEAD(krping_cbs);
118 * krping "ping/pong" loop:
119 * client sends source rkey/addr/len
120 * server receives source rkey/add/len
121 * server rdma reads "ping" data from source
122 * server sends "go ahead" on rdma read completion
123 * client sends sink rkey/addr/len
124 * server receives sink rkey/addr/len
125 * server rdma writes "pong" data to sink
126 * server sends "go ahead" on rdma write completion
131 * These states are used to signal events between the completion handler
132 * and the main client or server thread.
134 * Once CONNECTED, they cycle through RDMA_READ_ADV, RDMA_WRITE_ADV,
135 * and RDMA_WRITE_COMPLETE for each ping.
150 struct krping_rdma_info {
157 * Default max buffer size for IO...
159 #define RPING_BUFSIZE 128*1024
160 #define RPING_SQ_DEPTH 64
163 * Control block struct.
167 int server; /* 0 iff client */
173 struct ib_mr *dma_mr;
175 struct ib_fast_reg_page_list *page_list;
177 struct ib_send_wr fastreg_wr;
178 struct ib_send_wr invalidate_wr;
179 struct ib_mr *fastreg_mr;
180 int server_invalidate;
185 struct ib_mw_bind bind_attr;
187 struct ib_recv_wr rq_wr; /* recv work request record */
188 struct ib_sge recv_sgl; /* recv single SGE */
189 struct krping_rdma_info recv_buf;/* malloc'd buffer */
191 DECLARE_PCI_UNMAP_ADDR(recv_mapping)
192 struct ib_mr *recv_mr;
194 struct ib_send_wr sq_wr; /* send work requrest record */
195 struct ib_sge send_sgl;
196 struct krping_rdma_info send_buf;/* single send buf */
198 DECLARE_PCI_UNMAP_ADDR(send_mapping)
199 struct ib_mr *send_mr;
201 struct ib_send_wr rdma_sq_wr; /* rdma work request record */
202 struct ib_sge rdma_sgl; /* rdma single SGE */
203 char *rdma_buf; /* used as rdma sink */
205 DECLARE_PCI_UNMAP_ADDR(rdma_mapping)
206 struct ib_mr *rdma_mr;
208 uint32_t remote_rkey; /* remote guys RKEY */
209 uint64_t remote_addr; /* remote guys TO */
210 uint32_t remote_len; /* remote guys LEN */
212 char *start_buf; /* rdma read src */
214 DECLARE_PCI_UNMAP_ADDR(start_mapping)
215 struct ib_mr *start_mr;
217 enum test_state state; /* used for cond/signalling */
218 wait_queue_head_t sem;
219 struct krping_stats stats;
221 uint16_t port; /* dst port in NBO */
222 struct in_addr addr; /* dst addr in NBO */
223 char *addr_str; /* dst addr string */
224 int verbose; /* verbose logging */
225 int count; /* ping count */
226 int size; /* ping data size */
227 int validate; /* validate ping data */
228 int wlat; /* run wlat test */
229 int rlat; /* run rlat test */
230 int bw; /* run bw test */
231 int duplex; /* run bw full duplex test */
232 int poll; /* poll or block for rlat test */
233 int txdepth; /* SQ depth */
234 int local_dma_lkey; /* use 0 for lkey */
235 int frtest; /* fastreg test */
238 struct rdma_cm_id *cm_id; /* connection on client side,*/
239 /* listener on server side. */
240 struct rdma_cm_id *child_cm_id; /* connection on server side */
241 struct list_head list;
244 static int krping_cma_event_handler(struct rdma_cm_id *cma_id,
245 struct rdma_cm_event *event)
248 struct krping_cb *cb = cma_id->context;
250 DEBUG_LOG(cb, "cma_event type %d cma_id %p (%s)\n", event->event,
251 cma_id, (cma_id == cb->cm_id) ? "parent" : "child");
253 switch (event->event) {
254 case RDMA_CM_EVENT_ADDR_RESOLVED:
255 cb->state = ADDR_RESOLVED;
256 ret = rdma_resolve_route(cma_id, 2000);
258 PRINTF(cb, "rdma_resolve_route error %d\n", ret);
259 wake_up_interruptible(&cb->sem);
263 case RDMA_CM_EVENT_ROUTE_RESOLVED:
264 cb->state = ROUTE_RESOLVED;
265 wake_up_interruptible(&cb->sem);
268 case RDMA_CM_EVENT_CONNECT_REQUEST:
269 cb->state = CONNECT_REQUEST;
270 cb->child_cm_id = cma_id;
271 DEBUG_LOG(cb, "child cma %p\n", cb->child_cm_id);
272 wake_up_interruptible(&cb->sem);
275 case RDMA_CM_EVENT_ESTABLISHED:
276 DEBUG_LOG(cb, "ESTABLISHED\n");
278 cb->state = CONNECTED;
280 wake_up_interruptible(&cb->sem);
283 case RDMA_CM_EVENT_ADDR_ERROR:
284 case RDMA_CM_EVENT_ROUTE_ERROR:
285 case RDMA_CM_EVENT_CONNECT_ERROR:
286 case RDMA_CM_EVENT_UNREACHABLE:
287 case RDMA_CM_EVENT_REJECTED:
288 PRINTF(cb, "cma event %d, error %d\n", event->event,
291 wake_up_interruptible(&cb->sem);
294 case RDMA_CM_EVENT_DISCONNECTED:
295 PRINTF(cb, "DISCONNECT EVENT...\n");
297 wake_up_interruptible(&cb->sem);
300 case RDMA_CM_EVENT_DEVICE_REMOVAL:
301 PRINTF(cb, "cma detected device removal!!!!\n");
305 PRINTF(cb, "oof bad type!\n");
306 wake_up_interruptible(&cb->sem);
312 static int server_recv(struct krping_cb *cb, struct ib_wc *wc)
314 if (wc->byte_len != sizeof(cb->recv_buf)) {
315 PRINTF(cb, "Received bogus data, size %d\n",
320 cb->remote_rkey = ntohl(cb->recv_buf.rkey);
321 cb->remote_addr = ntohll(cb->recv_buf.buf);
322 cb->remote_len = ntohl(cb->recv_buf.size);
323 DEBUG_LOG(cb, "Received rkey %x addr %llx len %d from peer\n",
324 cb->remote_rkey, (unsigned long long)cb->remote_addr,
327 if (cb->state <= CONNECTED || cb->state == RDMA_WRITE_COMPLETE)
328 cb->state = RDMA_READ_ADV;
330 cb->state = RDMA_WRITE_ADV;
335 static int client_recv(struct krping_cb *cb, struct ib_wc *wc)
337 if (wc->byte_len != sizeof(cb->recv_buf)) {
338 PRINTF(cb, "Received bogus data, size %d\n",
343 if (cb->state == RDMA_READ_ADV)
344 cb->state = RDMA_WRITE_ADV;
346 cb->state = RDMA_WRITE_COMPLETE;
351 static void krping_cq_event_handler(struct ib_cq *cq, void *ctx)
353 struct krping_cb *cb = ctx;
355 struct ib_recv_wr *bad_wr;
358 BUG_ON(cb->cq != cq);
359 if (cb->state == ERROR) {
360 PRINTF(cb, "cq completion in ERROR state\n");
364 PRINTF(cb, "cq completion event in frtest!\n");
367 if (!cb->wlat && !cb->rlat && !cb->bw)
368 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
369 while ((ret = ib_poll_cq(cb->cq, 1, &wc)) == 1) {
371 if (wc.status == IB_WC_WR_FLUSH_ERR) {
372 DEBUG_LOG(cb, "cq flushed\n");
375 PRINTF(cb, "cq completion failed with "
376 "wr_id %Lx status %d opcode %d vender_err %x\n",
377 wc.wr_id, wc.status, wc.opcode, wc.vendor_err);
384 DEBUG_LOG(cb, "send completion\n");
385 cb->stats.send_bytes += cb->send_sgl.length;
386 cb->stats.send_msgs++;
389 case IB_WC_RDMA_WRITE:
390 DEBUG_LOG(cb, "rdma write completion\n");
391 cb->stats.write_bytes += cb->rdma_sq_wr.sg_list->length;
392 cb->stats.write_msgs++;
393 cb->state = RDMA_WRITE_COMPLETE;
394 wake_up_interruptible(&cb->sem);
397 case IB_WC_RDMA_READ:
398 DEBUG_LOG(cb, "rdma read completion\n");
399 cb->stats.read_bytes += cb->rdma_sq_wr.sg_list->length;
400 cb->stats.read_msgs++;
401 cb->state = RDMA_READ_COMPLETE;
402 wake_up_interruptible(&cb->sem);
406 DEBUG_LOG(cb, "recv completion\n");
407 cb->stats.recv_bytes += sizeof(cb->recv_buf);
408 cb->stats.recv_msgs++;
409 if (cb->wlat || cb->rlat || cb->bw)
410 ret = server_recv(cb, &wc);
412 ret = cb->server ? server_recv(cb, &wc) :
413 client_recv(cb, &wc);
415 PRINTF(cb, "recv wc error: %d\n", ret);
419 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
421 PRINTF(cb, "post recv error: %d\n",
425 wake_up_interruptible(&cb->sem);
430 "%s:%d Unexpected opcode %d, Shutting down\n",
431 __func__, __LINE__, wc.opcode);
436 PRINTF(cb, "poll error %d\n", ret);
442 wake_up_interruptible(&cb->sem);
445 static int krping_accept(struct krping_cb *cb)
447 struct rdma_conn_param conn_param;
450 DEBUG_LOG(cb, "accepting client connection request\n");
452 memset(&conn_param, 0, sizeof conn_param);
453 conn_param.responder_resources = 1;
454 conn_param.initiator_depth = 1;
456 ret = rdma_accept(cb->child_cm_id, &conn_param);
458 PRINTF(cb, "rdma_accept error: %d\n", ret);
462 if (!cb->wlat && !cb->rlat && !cb->bw) {
463 wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
464 if (cb->state == ERROR) {
465 PRINTF(cb, "wait for CONNECTED state %d\n",
473 static void krping_setup_wr(struct krping_cb *cb)
475 cb->recv_sgl.addr = cb->recv_dma_addr;
476 cb->recv_sgl.length = sizeof cb->recv_buf;
477 if (cb->local_dma_lkey)
478 cb->recv_sgl.lkey = cb->qp->device->local_dma_lkey;
479 else if (cb->mem == DMA)
480 cb->recv_sgl.lkey = cb->dma_mr->lkey;
482 cb->recv_sgl.lkey = cb->recv_mr->lkey;
483 cb->rq_wr.sg_list = &cb->recv_sgl;
484 cb->rq_wr.num_sge = 1;
486 cb->send_sgl.addr = cb->send_dma_addr;
487 cb->send_sgl.length = sizeof cb->send_buf;
488 if (cb->local_dma_lkey)
489 cb->send_sgl.lkey = cb->qp->device->local_dma_lkey;
490 else if (cb->mem == DMA)
491 cb->send_sgl.lkey = cb->dma_mr->lkey;
493 cb->send_sgl.lkey = cb->send_mr->lkey;
495 cb->sq_wr.opcode = IB_WR_SEND;
496 cb->sq_wr.send_flags = IB_SEND_SIGNALED;
497 cb->sq_wr.sg_list = &cb->send_sgl;
498 cb->sq_wr.num_sge = 1;
500 if (cb->server || cb->wlat || cb->rlat || cb->bw) {
501 cb->rdma_sgl.addr = cb->rdma_dma_addr;
503 cb->rdma_sgl.lkey = cb->rdma_mr->lkey;
504 cb->rdma_sq_wr.send_flags = IB_SEND_SIGNALED;
505 cb->rdma_sq_wr.sg_list = &cb->rdma_sgl;
506 cb->rdma_sq_wr.num_sge = 1;
513 * A chain of 2 WRs, INVALDATE_MR + FAST_REG_MR.
514 * both unsignaled. The client uses them to reregister
515 * the rdma buffers with a new key each iteration.
517 cb->fastreg_wr.opcode = IB_WR_FAST_REG_MR;
518 cb->fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
519 cb->fastreg_wr.wr.fast_reg.length = cb->size;
520 cb->fastreg_wr.wr.fast_reg.page_list = cb->page_list;
521 cb->fastreg_wr.wr.fast_reg.page_list_len = cb->page_list_len;
523 cb->invalidate_wr.next = &cb->fastreg_wr;
524 cb->invalidate_wr.opcode = IB_WR_LOCAL_INV;
527 cb->bind_attr.wr_id = 0xabbaabba;
528 cb->bind_attr.send_flags = 0; /* unsignaled */
529 cb->bind_attr.length = cb->size;
536 static int krping_setup_buffers(struct krping_cb *cb)
539 struct ib_phys_buf buf;
542 DEBUG_LOG(cb, "krping_setup_buffers called on cb %p\n", cb);
544 cb->recv_dma_addr = dma_map_single(cb->pd->device->dma_device,
546 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
547 pci_unmap_addr_set(cb, recv_mapping, cb->recv_dma_addr);
548 cb->send_dma_addr = dma_map_single(cb->pd->device->dma_device,
549 &cb->send_buf, sizeof(cb->send_buf),
551 pci_unmap_addr_set(cb, send_mapping, cb->send_dma_addr);
553 if (cb->mem == DMA) {
554 cb->dma_mr = ib_get_dma_mr(cb->pd, IB_ACCESS_LOCAL_WRITE|
555 IB_ACCESS_REMOTE_READ|
556 IB_ACCESS_REMOTE_WRITE);
557 if (IS_ERR(cb->dma_mr)) {
558 DEBUG_LOG(cb, "reg_dmamr failed\n");
559 ret = PTR_ERR(cb->dma_mr);
563 if (!cb->local_dma_lkey) {
564 buf.addr = cb->recv_dma_addr;
565 buf.size = sizeof cb->recv_buf;
566 DEBUG_LOG(cb, "recv buf dma_addr %llx size %d\n", buf.addr,
568 iovbase = cb->recv_dma_addr;
569 cb->recv_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
570 IB_ACCESS_LOCAL_WRITE,
573 if (IS_ERR(cb->recv_mr)) {
574 DEBUG_LOG(cb, "recv_buf reg_mr failed\n");
575 ret = PTR_ERR(cb->recv_mr);
579 buf.addr = cb->send_dma_addr;
580 buf.size = sizeof cb->send_buf;
581 DEBUG_LOG(cb, "send buf dma_addr %llx size %d\n", buf.addr,
583 iovbase = cb->send_dma_addr;
584 cb->send_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
587 if (IS_ERR(cb->send_mr)) {
588 DEBUG_LOG(cb, "send_buf reg_mr failed\n");
589 ret = PTR_ERR(cb->send_mr);
595 cb->rdma_buf = kmalloc(cb->size, GFP_KERNEL);
597 DEBUG_LOG(cb, "rdma_buf malloc failed\n");
602 cb->rdma_dma_addr = dma_map_single(cb->pd->device->dma_device,
603 cb->rdma_buf, cb->size,
605 pci_unmap_addr_set(cb, rdma_mapping, cb->rdma_dma_addr);
606 if (cb->mem != DMA) {
609 cb->page_list_len = (((cb->size - 1) & PAGE_MASK) +
610 PAGE_SIZE) >> PAGE_SHIFT;
611 cb->page_list = ib_alloc_fast_reg_page_list(
614 if (IS_ERR(cb->page_list)) {
615 DEBUG_LOG(cb, "recv_buf reg_mr failed\n");
616 ret = PTR_ERR(cb->page_list);
619 cb->fastreg_mr = ib_alloc_fast_reg_mr(cb->pd,
620 cb->page_list->max_page_list_len);
621 if (IS_ERR(cb->fastreg_mr)) {
622 DEBUG_LOG(cb, "recv_buf reg_mr failed\n");
623 ret = PTR_ERR(cb->fastreg_mr);
626 DEBUG_LOG(cb, "fastreg rkey 0x%x page_list %p"
627 " page_list_len %u\n", cb->fastreg_mr->rkey,
628 cb->page_list, cb->page_list_len);
631 cb->mw = ib_alloc_mw(cb->pd);
632 if (IS_ERR(cb->mw)) {
633 DEBUG_LOG(cb, "recv_buf alloc_mw failed\n");
634 ret = PTR_ERR(cb->mw);
637 DEBUG_LOG(cb, "mw rkey 0x%x\n", cb->mw->rkey);
640 buf.addr = cb->rdma_dma_addr;
642 iovbase = cb->rdma_dma_addr;
643 cb->rdma_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
644 IB_ACCESS_REMOTE_READ|
645 IB_ACCESS_REMOTE_WRITE,
647 if (IS_ERR(cb->rdma_mr)) {
648 DEBUG_LOG(cb, "rdma_buf reg_mr failed\n");
649 ret = PTR_ERR(cb->rdma_mr);
652 DEBUG_LOG(cb, "rdma buf dma_addr %llx size %d mr rkey 0x%x\n",
653 buf.addr, (int)buf.size, cb->rdma_mr->rkey);
662 if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
664 cb->start_buf = kmalloc(cb->size, GFP_KERNEL);
665 if (!cb->start_buf) {
666 DEBUG_LOG(cb, "start_buf malloc failed\n");
671 cb->start_dma_addr = dma_map_single(cb->pd->device->dma_device,
672 cb->start_buf, cb->size,
674 pci_unmap_addr_set(cb, start_mapping, cb->start_dma_addr);
676 if (cb->mem == MR || cb->mem == MW) {
677 unsigned flags = IB_ACCESS_REMOTE_READ;
679 if (cb->wlat || cb->rlat || cb->bw)
680 flags |= IB_ACCESS_REMOTE_WRITE;
682 buf.addr = cb->start_dma_addr;
684 DEBUG_LOG(cb, "start buf dma_addr %llx size %d\n",
685 buf.addr, (int)buf.size);
686 iovbase = cb->start_dma_addr;
687 cb->start_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
691 if (IS_ERR(cb->start_mr)) {
692 DEBUG_LOG(cb, "start_buf reg_mr failed\n");
693 ret = PTR_ERR(cb->start_mr);
700 DEBUG_LOG(cb, "allocated & registered buffers...\n");
703 if (cb->fastreg_mr && !IS_ERR(cb->fastreg_mr))
704 ib_dereg_mr(cb->fastreg_mr);
705 if (cb->mw && !IS_ERR(cb->mw))
706 ib_dealloc_mw(cb->mw);
707 if (cb->rdma_mr && !IS_ERR(cb->rdma_mr))
708 ib_dereg_mr(cb->rdma_mr);
709 if (cb->page_list && !IS_ERR(cb->page_list))
710 ib_free_fast_reg_page_list(cb->page_list);
711 if (cb->dma_mr && !IS_ERR(cb->dma_mr))
712 ib_dereg_mr(cb->dma_mr);
713 if (cb->recv_mr && !IS_ERR(cb->recv_mr))
714 ib_dereg_mr(cb->recv_mr);
715 if (cb->send_mr && !IS_ERR(cb->send_mr))
716 ib_dereg_mr(cb->send_mr);
720 kfree(cb->start_buf);
724 static void krping_free_buffers(struct krping_cb *cb)
726 DEBUG_LOG(cb, "krping_free_buffers called on cb %p\n", cb);
729 ib_dereg_mr(cb->dma_mr);
731 ib_dereg_mr(cb->send_mr);
733 ib_dereg_mr(cb->recv_mr);
735 ib_dereg_mr(cb->rdma_mr);
737 ib_dereg_mr(cb->start_mr);
739 ib_dereg_mr(cb->fastreg_mr);
741 ib_dealloc_mw(cb->mw);
743 dma_unmap_single(cb->pd->device->dma_device,
744 pci_unmap_addr(cb, recv_mapping),
745 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
746 dma_unmap_single(cb->pd->device->dma_device,
747 pci_unmap_addr(cb, send_mapping),
748 sizeof(cb->send_buf), DMA_BIDIRECTIONAL);
749 dma_unmap_single(cb->pd->device->dma_device,
750 pci_unmap_addr(cb, rdma_mapping),
751 cb->size, DMA_BIDIRECTIONAL);
754 dma_unmap_single(cb->pd->device->dma_device,
755 pci_unmap_addr(cb, start_mapping),
756 cb->size, DMA_BIDIRECTIONAL);
757 kfree(cb->start_buf);
761 static int krping_create_qp(struct krping_cb *cb)
763 struct ib_qp_init_attr init_attr;
766 memset(&init_attr, 0, sizeof(init_attr));
767 init_attr.cap.max_send_wr = cb->txdepth;
768 init_attr.cap.max_recv_wr = 2;
769 init_attr.cap.max_recv_sge = 1;
770 init_attr.cap.max_send_sge = 1;
771 init_attr.qp_type = IB_QPT_RC;
772 init_attr.send_cq = cb->cq;
773 init_attr.recv_cq = cb->cq;
774 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
777 ret = rdma_create_qp(cb->child_cm_id, cb->pd, &init_attr);
779 cb->qp = cb->child_cm_id->qp;
781 ret = rdma_create_qp(cb->cm_id, cb->pd, &init_attr);
783 cb->qp = cb->cm_id->qp;
789 static void krping_free_qp(struct krping_cb *cb)
791 ib_destroy_qp(cb->qp);
792 ib_destroy_cq(cb->cq);
793 ib_dealloc_pd(cb->pd);
796 static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
799 cb->pd = ib_alloc_pd(cm_id->device);
800 if (IS_ERR(cb->pd)) {
801 PRINTF(cb, "ib_alloc_pd failed\n");
802 return PTR_ERR(cb->pd);
804 DEBUG_LOG(cb, "created pd %p\n", cb->pd);
806 strlcpy(cb->stats.name, cb->pd->device->name, sizeof(cb->stats.name));
808 cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL,
809 cb, cb->txdepth * 2, 0);
810 if (IS_ERR(cb->cq)) {
811 PRINTF(cb, "ib_create_cq failed\n");
812 ret = PTR_ERR(cb->cq);
815 DEBUG_LOG(cb, "created cq %p\n", cb->cq);
817 if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) {
818 ret = ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
820 PRINTF(cb, "ib_create_cq failed\n");
825 ret = krping_create_qp(cb);
827 PRINTF(cb, "krping_create_qp failed: %d\n", ret);
830 DEBUG_LOG(cb, "created qp %p\n", cb->qp);
833 ib_destroy_cq(cb->cq);
835 ib_dealloc_pd(cb->pd);
840 * return the (possibly rebound) rkey for the rdma buffer.
841 * FASTREG mode: invalidate and rebind via fastreg wr.
842 * MW mode: rebind the MW.
843 * other modes: just return the mr rkey.
845 static u32 krping_rdma_rkey(struct krping_cb *cb, u64 buf, int post_inv)
847 u32 rkey = 0xffffffff;
849 struct ib_send_wr *bad_wr;
855 cb->invalidate_wr.ex.invalidate_rkey = cb->fastreg_mr->rkey;
858 * Update the fastreg key.
860 ib_update_fast_reg_key(cb->fastreg_mr, ++cb->key);
861 cb->fastreg_wr.wr.fast_reg.rkey = cb->fastreg_mr->rkey;
864 * Update the fastreg WR with new buf info.
866 if (buf == (u64)cb->start_dma_addr)
867 cb->fastreg_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_READ;
869 cb->fastreg_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
870 cb->fastreg_wr.wr.fast_reg.iova_start = buf;
871 p = (u64)(buf & PAGE_MASK);
872 for (i=0; i < cb->fastreg_wr.wr.fast_reg.page_list_len;
873 i++, p += PAGE_SIZE) {
874 cb->page_list->page_list[i] = p;
875 DEBUG_LOG(cb, "page_list[%d] 0x%llx\n", i, p);
878 DEBUG_LOG(cb, "post_inv = %d, fastreg new rkey 0x%x shift %u len %u"
879 " iova_start %llx page_list_len %u\n",
881 cb->fastreg_wr.wr.fast_reg.rkey,
882 cb->fastreg_wr.wr.fast_reg.page_shift,
883 cb->fastreg_wr.wr.fast_reg.length,
884 cb->fastreg_wr.wr.fast_reg.iova_start,
885 cb->fastreg_wr.wr.fast_reg.page_list_len);
888 ret = ib_post_send(cb->qp, &cb->invalidate_wr, &bad_wr);
890 ret = ib_post_send(cb->qp, &cb->fastreg_wr, &bad_wr);
892 PRINTF(cb, "post send error %d\n", ret);
895 rkey = cb->fastreg_mr->rkey;
899 * Update the MW with new buf info.
901 if (buf == (u64)cb->start_dma_addr) {
902 cb->bind_attr.mw_access_flags = IB_ACCESS_REMOTE_READ;
903 cb->bind_attr.mr = cb->start_mr;
905 cb->bind_attr.mw_access_flags = IB_ACCESS_REMOTE_WRITE;
906 cb->bind_attr.mr = cb->rdma_mr;
908 cb->bind_attr.addr = buf;
909 DEBUG_LOG(cb, "binding mw rkey 0x%x to buf %llx mr rkey 0x%x\n",
910 cb->mw->rkey, buf, cb->bind_attr.mr->rkey);
911 ret = ib_bind_mw(cb->qp, cb->mw, &cb->bind_attr);
913 PRINTF(cb, "bind mw error %d\n", ret);
919 if (buf == (u64)cb->start_dma_addr)
920 rkey = cb->start_mr->rkey;
922 rkey = cb->rdma_mr->rkey;
925 rkey = cb->dma_mr->rkey;
928 PRINTF(cb, "%s:%d case ERROR\n", __func__, __LINE__);
935 static void krping_format_send(struct krping_cb *cb, u64 buf)
937 struct krping_rdma_info *info = &cb->send_buf;
941 * Client side will do fastreg or mw bind before
942 * advertising the rdma buffer. Server side
943 * sends have no data.
945 if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
946 rkey = krping_rdma_rkey(cb, buf, !cb->server_invalidate);
947 info->buf = htonll(buf);
948 info->rkey = htonl(rkey);
949 info->size = htonl(cb->size);
950 DEBUG_LOG(cb, "RDMA addr %llx rkey %x len %d\n",
951 (unsigned long long)buf, rkey, cb->size);
955 static void krping_test_server(struct krping_cb *cb)
957 struct ib_send_wr *bad_wr, inv;
961 /* Wait for client's Start STAG/TO/Len */
962 wait_event_interruptible(cb->sem, cb->state >= RDMA_READ_ADV);
963 if (cb->state != RDMA_READ_ADV) {
964 PRINTF(cb, "wait for RDMA_READ_ADV state %d\n",
969 DEBUG_LOG(cb, "server received sink adv\n");
971 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
972 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
973 cb->rdma_sq_wr.sg_list->length = cb->remote_len;
974 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 1);
975 cb->rdma_sq_wr.next = NULL;
977 /* Issue RDMA Read. */
979 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
982 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ;
983 if (cb->mem == FASTREG) {
985 * Immediately follow the read with a
988 cb->rdma_sq_wr.next = &inv;
989 memset(&inv, 0, sizeof inv);
990 inv.opcode = IB_WR_LOCAL_INV;
991 inv.ex.invalidate_rkey = cb->fastreg_mr->rkey;
992 inv.send_flags = IB_SEND_FENCE;
996 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr);
998 PRINTF(cb, "post send error %d\n", ret);
1001 cb->rdma_sq_wr.next = NULL;
1003 DEBUG_LOG(cb, "server posted rdma read req \n");
1005 /* Wait for read completion */
1006 wait_event_interruptible(cb->sem,
1007 cb->state >= RDMA_READ_COMPLETE);
1008 if (cb->state != RDMA_READ_COMPLETE) {
1010 "wait for RDMA_READ_COMPLETE state %d\n",
1014 DEBUG_LOG(cb, "server received read complete\n");
1016 /* Display data in recv buf */
1018 PRINTF(cb, "server ping data: %s\n",
1021 /* Tell client to continue */
1022 if (cb->server && cb->server_invalidate) {
1023 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
1024 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
1025 DEBUG_LOG(cb, "send-w-inv rkey 0x%x\n", cb->remote_rkey);
1027 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1029 PRINTF(cb, "post send error %d\n", ret);
1032 DEBUG_LOG(cb, "server posted go ahead\n");
1034 /* Wait for client's RDMA STAG/TO/Len */
1035 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
1036 if (cb->state != RDMA_WRITE_ADV) {
1038 "wait for RDMA_WRITE_ADV state %d\n",
1042 DEBUG_LOG(cb, "server received sink adv\n");
1044 /* RDMA Write echo data */
1045 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1046 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1047 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1048 cb->rdma_sq_wr.sg_list->length = strlen(cb->rdma_buf) + 1;
1049 if (cb->local_dma_lkey)
1050 cb->rdma_sgl.lkey = cb->qp->device->local_dma_lkey;
1052 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 0);
1054 DEBUG_LOG(cb, "rdma write from lkey %x laddr %llx len %d\n",
1055 cb->rdma_sq_wr.sg_list->lkey,
1056 (unsigned long long)cb->rdma_sq_wr.sg_list->addr,
1057 cb->rdma_sq_wr.sg_list->length);
1059 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr);
1061 PRINTF(cb, "post send error %d\n", ret);
1065 /* Wait for completion */
1066 ret = wait_event_interruptible(cb->sem, cb->state >=
1067 RDMA_WRITE_COMPLETE);
1068 if (cb->state != RDMA_WRITE_COMPLETE) {
1070 "wait for RDMA_WRITE_COMPLETE state %d\n",
1074 DEBUG_LOG(cb, "server rdma write complete \n");
1076 cb->state = CONNECTED;
1078 /* Tell client to begin again */
1079 if (cb->server && cb->server_invalidate) {
1080 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
1081 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
1082 DEBUG_LOG(cb, "send-w-inv rkey 0x%x\n", cb->remote_rkey);
1084 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1086 PRINTF(cb, "post send error %d\n", ret);
1089 DEBUG_LOG(cb, "server posted go ahead\n");
1093 static void rlat_test(struct krping_cb *cb)
1096 int iters = cb->count;
1097 struct timeval start_tv, stop_tv;
1100 struct ib_send_wr *bad_wr;
1104 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ;
1105 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1106 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1107 cb->rdma_sq_wr.sg_list->length = cb->size;
1109 microtime(&start_tv);
1111 cb->state = RDMA_READ_ADV;
1112 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
1114 while (scnt < iters) {
1116 cb->state = RDMA_READ_ADV;
1117 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr);
1120 "Couldn't post send: ret=%d scnt %d\n",
1127 wait_event_interruptible(cb->sem,
1128 cb->state != RDMA_READ_ADV);
1129 if (cb->state == RDMA_READ_COMPLETE) {
1131 ib_req_notify_cq(cb->cq,
1137 ne = ib_poll_cq(cb->cq, 1, &wc);
1138 if (cb->state == ERROR) {
1140 "state == ERROR...bailing scnt %d\n",
1147 PRINTF(cb, "poll CQ failed %d\n", ne);
1150 if (cb->poll && wc.status != IB_WC_SUCCESS) {
1151 PRINTF(cb, "Completion wth error at %s:\n",
1152 cb->server ? "server" : "client");
1153 PRINTF(cb, "Failed status %d: wr_id %d\n",
1154 wc.status, (int) wc.wr_id);
1159 microtime(&stop_tv);
1161 if (stop_tv.tv_usec < start_tv.tv_usec) {
1162 stop_tv.tv_usec += 1000000;
1163 stop_tv.tv_sec -= 1;
1166 PRINTF(cb, "delta sec %lu delta usec %lu iter %d size %d\n",
1167 stop_tv.tv_sec - start_tv.tv_sec,
1168 stop_tv.tv_usec - start_tv.tv_usec,
1172 static void wlat_test(struct krping_cb *cb)
1174 int ccnt, scnt, rcnt;
1175 int iters=cb->count;
1176 volatile char *poll_buf = (char *) cb->start_buf;
1177 char *buf = (char *)cb->rdma_buf;
1178 struct timeval start_tv, stop_tv;
1179 cycles_t *post_cycles_start, *post_cycles_stop;
1180 cycles_t *poll_cycles_start, *poll_cycles_stop;
1181 cycles_t *last_poll_cycles_start;
1182 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1184 int cycle_iters = 1000;
1190 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1191 if (!post_cycles_start) {
1192 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1195 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1196 if (!post_cycles_stop) {
1197 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1200 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1201 if (!poll_cycles_start) {
1202 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1205 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1206 if (!poll_cycles_stop) {
1207 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1210 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1212 if (!last_poll_cycles_start) {
1213 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1216 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1217 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1218 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1219 cb->rdma_sq_wr.sg_list->length = cb->size;
1221 if (cycle_iters > iters)
1222 cycle_iters = iters;
1223 microtime(&start_tv);
1224 while (scnt < iters || ccnt < iters || rcnt < iters) {
1226 /* Wait till buffer changes. */
1227 if (rcnt < iters && !(scnt < 1 && !cb->server)) {
1229 while (*poll_buf != (char)rcnt) {
1230 if (cb->state == ERROR) {
1232 "state = ERROR, bailing\n");
1239 struct ib_send_wr *bad_wr;
1241 *buf = (char)scnt+1;
1242 if (scnt < cycle_iters)
1243 post_cycles_start[scnt] = get_cycles();
1244 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) {
1246 "Couldn't post send: scnt=%d\n",
1250 if (scnt < cycle_iters)
1251 post_cycles_stop[scnt] = get_cycles();
1259 if (ccnt < cycle_iters)
1260 poll_cycles_start[ccnt] = get_cycles();
1262 if (ccnt < cycle_iters)
1263 last_poll_cycles_start[ccnt] =
1265 ne = ib_poll_cq(cb->cq, 1, &wc);
1267 if (ccnt < cycle_iters)
1268 poll_cycles_stop[ccnt] = get_cycles();
1272 PRINTF(cb, "poll CQ failed %d\n", ne);
1275 if (wc.status != IB_WC_SUCCESS) {
1277 "Completion wth error at %s:\n",
1278 cb->server ? "server" : "client");
1280 "Failed status %d: wr_id %d\n",
1281 wc.status, (int) wc.wr_id);
1283 "scnt=%d, rcnt=%d, ccnt=%d\n",
1289 microtime(&stop_tv);
1291 if (stop_tv.tv_usec < start_tv.tv_usec) {
1292 stop_tv.tv_usec += 1000000;
1293 stop_tv.tv_sec -= 1;
1296 for (i=0; i < cycle_iters; i++) {
1297 sum_post += post_cycles_stop[i] - post_cycles_start[i];
1298 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1299 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1302 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1303 " sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1304 stop_tv.tv_sec - start_tv.tv_sec,
1305 stop_tv.tv_usec - start_tv.tv_usec,
1306 scnt, cb->size, cycle_iters,
1307 (unsigned long long)sum_post, (unsigned long long)sum_poll,
1308 (unsigned long long)sum_last_poll);
1309 kfree(post_cycles_start);
1310 kfree(post_cycles_stop);
1311 kfree(poll_cycles_start);
1312 kfree(poll_cycles_stop);
1313 kfree(last_poll_cycles_start);
1316 static void bw_test(struct krping_cb *cb)
1318 int ccnt, scnt, rcnt;
1319 int iters=cb->count;
1320 struct timeval start_tv, stop_tv;
1321 cycles_t *post_cycles_start, *post_cycles_stop;
1322 cycles_t *poll_cycles_start, *poll_cycles_stop;
1323 cycles_t *last_poll_cycles_start;
1324 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1326 int cycle_iters = 1000;
1332 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1333 if (!post_cycles_start) {
1334 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1337 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1338 if (!post_cycles_stop) {
1339 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1342 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1343 if (!poll_cycles_start) {
1344 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1347 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1348 if (!poll_cycles_stop) {
1349 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1352 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1354 if (!last_poll_cycles_start) {
1355 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1358 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1359 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1360 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1361 cb->rdma_sq_wr.sg_list->length = cb->size;
1363 if (cycle_iters > iters)
1364 cycle_iters = iters;
1365 microtime(&start_tv);
1366 while (scnt < iters || ccnt < iters) {
1368 while (scnt < iters && scnt - ccnt < cb->txdepth) {
1369 struct ib_send_wr *bad_wr;
1371 if (scnt < cycle_iters)
1372 post_cycles_start[scnt] = get_cycles();
1373 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) {
1375 "Couldn't post send: scnt=%d\n",
1379 if (scnt < cycle_iters)
1380 post_cycles_stop[scnt] = get_cycles();
1388 if (ccnt < cycle_iters)
1389 poll_cycles_start[ccnt] = get_cycles();
1391 if (ccnt < cycle_iters)
1392 last_poll_cycles_start[ccnt] =
1394 ne = ib_poll_cq(cb->cq, 1, &wc);
1396 if (ccnt < cycle_iters)
1397 poll_cycles_stop[ccnt] = get_cycles();
1401 PRINTF(cb, "poll CQ failed %d\n", ne);
1404 if (wc.status != IB_WC_SUCCESS) {
1406 "Completion wth error at %s:\n",
1407 cb->server ? "server" : "client");
1409 "Failed status %d: wr_id %d\n",
1410 wc.status, (int) wc.wr_id);
1415 microtime(&stop_tv);
1417 if (stop_tv.tv_usec < start_tv.tv_usec) {
1418 stop_tv.tv_usec += 1000000;
1419 stop_tv.tv_sec -= 1;
1422 for (i=0; i < cycle_iters; i++) {
1423 sum_post += post_cycles_stop[i] - post_cycles_start[i];
1424 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1425 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1428 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1429 " sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1430 stop_tv.tv_sec - start_tv.tv_sec,
1431 stop_tv.tv_usec - start_tv.tv_usec,
1432 scnt, cb->size, cycle_iters,
1433 (unsigned long long)sum_post, (unsigned long long)sum_poll,
1434 (unsigned long long)sum_last_poll);
1435 kfree(post_cycles_start);
1436 kfree(post_cycles_stop);
1437 kfree(poll_cycles_start);
1438 kfree(poll_cycles_stop);
1439 kfree(last_poll_cycles_start);
1442 static void krping_rlat_test_server(struct krping_cb *cb)
1444 struct ib_send_wr *bad_wr;
1448 /* Spin waiting for client's Start STAG/TO/Len */
1449 while (cb->state < RDMA_READ_ADV) {
1450 krping_cq_event_handler(cb->cq, cb);
1453 /* Send STAG/TO/Len to client */
1454 krping_format_send(cb, cb->start_dma_addr);
1455 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1457 PRINTF(cb, "post send error %d\n", ret);
1461 /* Spin waiting for send completion */
1462 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1464 PRINTF(cb, "poll error %d\n", ret);
1468 PRINTF(cb, "send completiong error %d\n", wc.status);
1472 wait_event_interruptible(cb->sem, cb->state == ERROR);
1475 static void krping_wlat_test_server(struct krping_cb *cb)
1477 struct ib_send_wr *bad_wr;
1481 /* Spin waiting for client's Start STAG/TO/Len */
1482 while (cb->state < RDMA_READ_ADV) {
1483 krping_cq_event_handler(cb->cq, cb);
1486 /* Send STAG/TO/Len to client */
1487 krping_format_send(cb, cb->start_dma_addr);
1488 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1490 PRINTF(cb, "post send error %d\n", ret);
1494 /* Spin waiting for send completion */
1495 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1497 PRINTF(cb, "poll error %d\n", ret);
1501 PRINTF(cb, "send completiong error %d\n", wc.status);
1506 wait_event_interruptible(cb->sem, cb->state == ERROR);
1509 static void krping_bw_test_server(struct krping_cb *cb)
1511 struct ib_send_wr *bad_wr;
1515 /* Spin waiting for client's Start STAG/TO/Len */
1516 while (cb->state < RDMA_READ_ADV) {
1517 krping_cq_event_handler(cb->cq, cb);
1520 /* Send STAG/TO/Len to client */
1521 krping_format_send(cb, cb->start_dma_addr);
1522 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1524 PRINTF(cb, "post send error %d\n", ret);
1528 /* Spin waiting for send completion */
1529 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1531 PRINTF(cb, "poll error %d\n", ret);
1535 PRINTF(cb, "send completiong error %d\n", wc.status);
1541 wait_event_interruptible(cb->sem, cb->state == ERROR);
1544 static int fastreg_supported(struct krping_cb *cb)
1546 struct ib_device *dev = cb->child_cm_id->device;
1547 struct ib_device_attr attr;
1550 ret = ib_query_device(dev, &attr);
1552 PRINTF(cb, "ib_query_device failed ret %d\n", ret);
1555 if (!(attr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
1556 PRINTF(cb, "Fastreg not supported - device_cap_flags 0x%x\n",
1557 attr.device_cap_flags);
1560 DEBUG_LOG(cb, "Fastreg supported - device_cap_flags 0x%x\n",
1561 attr.device_cap_flags);
1565 static int krping_bind_server(struct krping_cb *cb)
1567 struct sockaddr_in sin;
1570 memset(&sin, 0, sizeof(sin));
1571 sin.sin_len = sizeof sin;
1572 sin.sin_family = AF_INET;
1573 sin.sin_addr.s_addr = cb->addr.s_addr;
1574 sin.sin_port = cb->port;
1576 ret = rdma_bind_addr(cb->cm_id, (struct sockaddr *) &sin);
1578 PRINTF(cb, "rdma_bind_addr error %d\n", ret);
1581 DEBUG_LOG(cb, "rdma_bind_addr successful\n");
1583 DEBUG_LOG(cb, "rdma_listen\n");
1584 ret = rdma_listen(cb->cm_id, 3);
1586 PRINTF(cb, "rdma_listen failed: %d\n", ret);
1590 wait_event_interruptible(cb->sem, cb->state >= CONNECT_REQUEST);
1591 if (cb->state != CONNECT_REQUEST) {
1592 PRINTF(cb, "wait for CONNECT_REQUEST state %d\n",
1597 if (cb->mem == FASTREG && !fastreg_supported(cb))
1603 static void krping_run_server(struct krping_cb *cb)
1605 struct ib_recv_wr *bad_wr;
1608 ret = krping_bind_server(cb);
1612 ret = krping_setup_qp(cb, cb->child_cm_id);
1614 PRINTF(cb, "setup_qp failed: %d\n", ret);
1618 ret = krping_setup_buffers(cb);
1620 PRINTF(cb, "krping_setup_buffers failed: %d\n", ret);
1624 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1626 PRINTF(cb, "ib_post_recv failed: %d\n", ret);
1630 ret = krping_accept(cb);
1632 PRINTF(cb, "connect error %d\n", ret);
1637 krping_wlat_test_server(cb);
1639 krping_rlat_test_server(cb);
1641 krping_bw_test_server(cb);
1643 krping_test_server(cb);
1644 rdma_disconnect(cb->child_cm_id);
1646 krping_free_buffers(cb);
1650 rdma_destroy_id(cb->child_cm_id);
1653 static void krping_test_client(struct krping_cb *cb)
1655 int ping, start, cc, i, ret;
1656 struct ib_send_wr *bad_wr;
1660 for (ping = 0; !cb->count || ping < cb->count; ping++) {
1661 cb->state = RDMA_READ_ADV;
1663 /* Put some ascii text in the buffer. */
1664 cc = sprintf(cb->start_buf, "rdma-ping-%d: ", ping);
1665 for (i = cc, c = start; i < cb->size; i++) {
1666 cb->start_buf[i] = c;
1674 cb->start_buf[cb->size - 1] = 0;
1676 krping_format_send(cb, cb->start_dma_addr);
1677 if (cb->state == ERROR) {
1678 PRINTF(cb, "krping_format_send failed\n");
1681 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1683 PRINTF(cb, "post send error %d\n", ret);
1687 /* Wait for server to ACK */
1688 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
1689 if (cb->state != RDMA_WRITE_ADV) {
1691 "wait for RDMA_WRITE_ADV state %d\n",
1696 krping_format_send(cb, cb->rdma_dma_addr);
1697 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1699 PRINTF(cb, "post send error %d\n", ret);
1703 /* Wait for the server to say the RDMA Write is complete. */
1704 wait_event_interruptible(cb->sem,
1705 cb->state >= RDMA_WRITE_COMPLETE);
1706 if (cb->state != RDMA_WRITE_COMPLETE) {
1708 "wait for RDMA_WRITE_COMPLETE state %d\n",
1714 if (memcmp(cb->start_buf, cb->rdma_buf, cb->size)) {
1715 PRINTF(cb, "data mismatch!\n");
1720 PRINTF(cb, "ping data: %s\n", cb->rdma_buf);
1722 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1727 static void krping_rlat_test_client(struct krping_cb *cb)
1729 struct ib_send_wr *bad_wr;
1733 cb->state = RDMA_READ_ADV;
1735 /* Send STAG/TO/Len to client */
1736 krping_format_send(cb, cb->start_dma_addr);
1737 if (cb->state == ERROR) {
1738 PRINTF(cb, "krping_format_send failed\n");
1741 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1743 PRINTF(cb, "post send error %d\n", ret);
1747 /* Spin waiting for send completion */
1748 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1750 PRINTF(cb, "poll error %d\n", ret);
1754 PRINTF(cb, "send completion error %d\n", wc.status);
1758 /* Spin waiting for server's Start STAG/TO/Len */
1759 while (cb->state < RDMA_WRITE_ADV) {
1760 krping_cq_event_handler(cb->cq, cb);
1766 struct timeval start, stop;
1769 unsigned long long elapsed;
1771 struct ib_send_wr *bad_wr;
1774 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1775 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1776 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1777 cb->rdma_sq_wr.sg_list->length = 0;
1778 cb->rdma_sq_wr.num_sge = 0;
1781 for (i=0; i < 100000; i++) {
1782 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) {
1783 PRINTF(cb, "Couldn't post send\n");
1787 ne = ib_poll_cq(cb->cq, 1, &wc);
1790 PRINTF(cb, "poll CQ failed %d\n", ne);
1793 if (wc.status != IB_WC_SUCCESS) {
1794 PRINTF(cb, "Completion wth error at %s:\n",
1795 cb->server ? "server" : "client");
1796 PRINTF(cb, "Failed status %d: wr_id %d\n",
1797 wc.status, (int) wc.wr_id);
1803 if (stop.tv_usec < start.tv_usec) {
1804 stop.tv_usec += 1000000;
1807 sec = stop.tv_sec - start.tv_sec;
1808 usec = stop.tv_usec - start.tv_usec;
1809 elapsed = sec * 1000000 + usec;
1810 PRINTF(cb, "0B-write-lat iters 100000 usec %llu\n", elapsed);
1817 static void krping_wlat_test_client(struct krping_cb *cb)
1819 struct ib_send_wr *bad_wr;
1823 cb->state = RDMA_READ_ADV;
1825 /* Send STAG/TO/Len to client */
1826 krping_format_send(cb, cb->start_dma_addr);
1827 if (cb->state == ERROR) {
1828 PRINTF(cb, "krping_format_send failed\n");
1831 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1833 PRINTF(cb, "post send error %d\n", ret);
1837 /* Spin waiting for send completion */
1838 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1840 PRINTF(cb, "poll error %d\n", ret);
1844 PRINTF(cb, "send completion error %d\n", wc.status);
1848 /* Spin waiting for server's Start STAG/TO/Len */
1849 while (cb->state < RDMA_WRITE_ADV) {
1850 krping_cq_event_handler(cb->cq, cb);
1856 static void krping_bw_test_client(struct krping_cb *cb)
1858 struct ib_send_wr *bad_wr;
1862 cb->state = RDMA_READ_ADV;
1864 /* Send STAG/TO/Len to client */
1865 krping_format_send(cb, cb->start_dma_addr);
1866 if (cb->state == ERROR) {
1867 PRINTF(cb, "krping_format_send failed\n");
1870 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1872 PRINTF(cb, "post send error %d\n", ret);
1876 /* Spin waiting for send completion */
1877 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1879 PRINTF(cb, "poll error %d\n", ret);
1883 PRINTF(cb, "send completion error %d\n", wc.status);
1887 /* Spin waiting for server's Start STAG/TO/Len */
1888 while (cb->state < RDMA_WRITE_ADV) {
1889 krping_cq_event_handler(cb->cq, cb);
1895 static void krping_fr_test(struct krping_cb *cb)
1897 struct ib_fast_reg_page_list *pl;
1898 struct ib_send_wr fr, inv, *bad;
1904 int size = cb->size;
1905 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1910 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen);
1912 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl));
1916 mr = ib_alloc_fast_reg_mr(cb->pd, plen);
1918 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl));
1922 for (i=0; i<plen; i++)
1923 pl->page_list[i] = 0xcafebabe | i;
1925 memset(&fr, 0, sizeof fr);
1926 fr.opcode = IB_WR_FAST_REG_MR;
1927 fr.wr.fast_reg.page_shift = PAGE_SHIFT;
1928 fr.wr.fast_reg.length = size;
1929 fr.wr.fast_reg.page_list = pl;
1930 fr.wr.fast_reg.page_list_len = plen;
1931 fr.wr.fast_reg.iova_start = 0;
1932 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
1934 memset(&inv, 0, sizeof inv);
1935 inv.opcode = IB_WR_LOCAL_INV;
1936 inv.send_flags = IB_SEND_SIGNALED;
1938 DEBUG_LOG(cb, "fr_test: stag index 0x%x plen %u size %u depth %u\n", mr->rkey >> 8, plen, cb->size, cb->txdepth);
1939 start = time_uptime;
1941 if ((time_uptime - start) >= 9) {
1942 DEBUG_LOG(cb, "fr_test: pausing 1 second! count %u latest size %u plen %u\n", count, size, plen);
1943 wait_event_interruptible(cb->sem, cb->state == ERROR);
1944 if (cb->state == ERROR)
1946 start = time_uptime;
1948 while (scnt < (cb->txdepth>>1)) {
1949 ib_update_fast_reg_key(mr, ++key);
1950 fr.wr.fast_reg.rkey = mr->rkey;
1951 inv.ex.invalidate_rkey = mr->rkey;
1952 size = arc4random() % cb->size;
1955 plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1956 fr.wr.fast_reg.length = size;
1957 fr.wr.fast_reg.page_list_len = plen;
1958 ret = ib_post_send(cb->qp, &fr, &bad);
1960 PRINTF(cb, "ib_post_send failed %d\n", ret);
1967 ret = ib_poll_cq(cb->cq, 1, &wc);
1969 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
1974 PRINTF(cb, "completion error %u\n", wc.status);
1980 else if (krping_sigpending()) {
1981 PRINTF(cb, "signal!\n");
1988 DEBUG_LOG(cb, "sleeping 1 second\n");
1989 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1991 DEBUG_LOG(cb, "draining the cq...\n");
1993 ret = ib_poll_cq(cb->cq, 1, &wc);
1995 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2000 PRINTF(cb, "completion error %u opcode %u\n", wc.status, wc.opcode);
2004 DEBUG_LOG(cb, "fr_test: done!\n");
2007 ib_free_fast_reg_page_list(pl);
2010 static int krping_connect_client(struct krping_cb *cb)
2012 struct rdma_conn_param conn_param;
2015 memset(&conn_param, 0, sizeof conn_param);
2016 conn_param.responder_resources = 1;
2017 conn_param.initiator_depth = 1;
2018 conn_param.retry_count = 10;
2020 ret = rdma_connect(cb->cm_id, &conn_param);
2022 PRINTF(cb, "rdma_connect error %d\n", ret);
2026 wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
2027 if (cb->state == ERROR) {
2028 PRINTF(cb, "wait for CONNECTED state %d\n", cb->state);
2032 DEBUG_LOG(cb, "rdma_connect successful\n");
2036 static int krping_bind_client(struct krping_cb *cb)
2038 struct sockaddr_in sin;
2041 memset(&sin, 0, sizeof(sin));
2042 sin.sin_len = sizeof sin;
2043 sin.sin_family = AF_INET;
2044 sin.sin_addr.s_addr = cb->addr.s_addr;
2045 sin.sin_port = cb->port;
2047 ret = rdma_resolve_addr(cb->cm_id, NULL, (struct sockaddr *) &sin,
2050 PRINTF(cb, "rdma_resolve_addr error %d\n", ret);
2054 wait_event_interruptible(cb->sem, cb->state >= ROUTE_RESOLVED);
2055 if (cb->state != ROUTE_RESOLVED) {
2057 "addr/route resolution did not resolve: state %d\n",
2062 if (cb->mem == FASTREG && !fastreg_supported(cb))
2065 DEBUG_LOG(cb, "rdma_resolve_addr - rdma_resolve_route successful\n");
2069 static void krping_run_client(struct krping_cb *cb)
2071 struct ib_recv_wr *bad_wr;
2074 ret = krping_bind_client(cb);
2078 ret = krping_setup_qp(cb, cb->cm_id);
2080 PRINTF(cb, "setup_qp failed: %d\n", ret);
2084 ret = krping_setup_buffers(cb);
2086 PRINTF(cb, "krping_setup_buffers failed: %d\n", ret);
2090 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
2092 PRINTF(cb, "ib_post_recv failed: %d\n", ret);
2096 ret = krping_connect_client(cb);
2098 PRINTF(cb, "connect error %d\n", ret);
2103 krping_wlat_test_client(cb);
2105 krping_rlat_test_client(cb);
2107 krping_bw_test_client(cb);
2108 else if (cb->frtest)
2111 krping_test_client(cb);
2112 rdma_disconnect(cb->cm_id);
2114 krping_free_buffers(cb);
2119 int krping_doit(char *cmd, void *cookie)
2121 struct krping_cb *cb;
2125 unsigned long optint;
2127 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
2131 mutex_lock(&krping_mutex);
2132 list_add_tail(&cb->list, &krping_cbs);
2133 mutex_unlock(&krping_mutex);
2135 cb->cookie = cookie;
2139 cb->txdepth = RPING_SQ_DEPTH;
2141 init_waitqueue_head(&cb->sem);
2143 while ((op = krping_getopt("krping", &cmd, krping_opts, NULL, &optarg,
2147 cb->addr_str = optarg;
2148 DEBUG_LOG(cb, "ipaddr (%s)\n", optarg);
2149 if (!inet_aton(optarg, &cb->addr)) {
2150 PRINTF(cb, "bad addr string %s\n",
2156 cb->port = htons(optint);
2157 DEBUG_LOG(cb, "port %d\n", (int)optint);
2161 DEBUG_LOG(cb, "server\n");
2165 DEBUG_LOG(cb, "server\n");
2169 DEBUG_LOG(cb, "client\n");
2173 if ((cb->size < 1) ||
2174 (cb->size > RPING_BUFSIZE)) {
2175 PRINTF(cb, "Invalid size %d "
2176 "(valid range is 1 to %d)\n",
2177 cb->size, RPING_BUFSIZE);
2180 DEBUG_LOG(cb, "size %d\n", (int)optint);
2184 if (cb->count < 0) {
2185 PRINTF(cb, "Invalid count %d\n",
2189 DEBUG_LOG(cb, "count %d\n", (int) cb->count);
2193 DEBUG_LOG(cb, "verbose\n");
2197 DEBUG_LOG(cb, "validate data\n");
2212 if (!strncmp(optarg, "dma", 3))
2214 else if (!strncmp(optarg, "fastreg", 7))
2216 else if (!strncmp(optarg, "mw", 2))
2218 else if (!strncmp(optarg, "mr", 2))
2221 PRINTF(cb, "unknown mem mode %s. "
2222 "Must be dma, fastreg, mw, or mr\n",
2229 cb->server_invalidate = 1;
2232 cb->txdepth = optint;
2233 DEBUG_LOG(cb, "txdepth %d\n", (int) cb->txdepth);
2236 cb->local_dma_lkey = 1;
2237 DEBUG_LOG(cb, "using local dma lkey\n");
2241 DEBUG_LOG(cb, "using read-with-inv\n");
2245 DEBUG_LOG(cb, "fast-reg test!\n");
2248 PRINTF(cb, "unknown opt %s\n", optarg);
2256 if (cb->server == -1) {
2257 PRINTF(cb, "must be either client or server\n");
2262 if (cb->server && cb->frtest) {
2263 PRINTF(cb, "must be client to run frtest\n");
2268 if ((cb->frtest + cb->bw + cb->rlat + cb->wlat) > 1) {
2269 PRINTF(cb, "Pick only one test: fr, bw, rlat, wlat\n");
2274 if (cb->server_invalidate && cb->mem != FASTREG) {
2275 PRINTF(cb, "server_invalidate only valid with fastreg mem_mode\n");
2280 if (cb->read_inv && cb->mem != FASTREG) {
2281 PRINTF(cb, "read_inv only valid with fastreg mem_mode\n");
2286 if (cb->mem != MR && (cb->wlat || cb->rlat || cb->bw)) {
2287 PRINTF(cb, "wlat, rlat, and bw tests only support mem_mode MR\n");
2292 cb->cm_id = rdma_create_id(krping_cma_event_handler, cb, RDMA_PS_TCP);
2293 if (IS_ERR(cb->cm_id)) {
2294 ret = PTR_ERR(cb->cm_id);
2295 PRINTF(cb, "rdma_create_id error %d\n", ret);
2298 DEBUG_LOG(cb, "created cm_id %p\n", cb->cm_id);
2301 krping_run_server(cb);
2303 krping_run_client(cb);
2305 DEBUG_LOG(cb, "destroy cm_id %p\n", cb->cm_id);
2306 rdma_destroy_id(cb->cm_id);
2308 mutex_lock(&krping_mutex);
2309 list_del(&cb->list);
2310 mutex_unlock(&krping_mutex);
2316 krping_walk_cb_list(void (*f)(struct krping_stats *, void *), void *arg)
2318 struct krping_cb *cb;
2320 mutex_lock(&krping_mutex);
2321 list_for_each_entry(cb, &krping_cbs, list)
2322 (*f)(cb->pd ? &cb->stats : NULL, arg);
2323 mutex_unlock(&krping_mutex);
2326 void krping_init(void)
2329 mutex_init(&krping_mutex);