2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2006-2009 Open Grid Computing, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/slab.h>
40 #include <linux/err.h>
41 #include <linux/string.h>
42 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/pci.h>
46 #include <linux/sched.h>
48 #include <asm/atomic.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/rdma_cm.h>
56 extern int krping_debug;
57 #define DEBUG_LOG(cb, x...) if (krping_debug) krping_printf((cb)->cookie, x)
58 #define PRINTF(cb, x...) krping_printf((cb)->cookie, x)
60 MODULE_AUTHOR("Steve Wise");
61 MODULE_DESCRIPTION("RDMA ping client/server");
62 MODULE_LICENSE("Dual BSD/GPL");
64 static __inline uint64_t
68 __asm __volatile("rdtsc" : "=a" (low), "=d" (high));
69 return (low | ((u_int64_t)high << 32));
72 typedef uint64_t cycles_t;
81 static const struct krping_option krping_opts[] = {
82 {"count", OPT_INT, 'C'},
83 {"size", OPT_INT, 'S'},
84 {"addr", OPT_STRING, 'a'},
85 {"port", OPT_INT, 'p'},
86 {"verbose", OPT_NOPARAM, 'v'},
87 {"validate", OPT_NOPARAM, 'V'},
88 {"server", OPT_NOPARAM, 's'},
89 {"client", OPT_NOPARAM, 'c'},
90 {"mem_mode", OPT_STRING, 'm'},
91 {"server_inv", OPT_NOPARAM, 'I'},
92 {"wlat", OPT_NOPARAM, 'l'},
93 {"rlat", OPT_NOPARAM, 'L'},
94 {"bw", OPT_NOPARAM, 'B'},
95 {"duplex", OPT_NOPARAM, 'd'},
96 {"txdepth", OPT_INT, 'T'},
97 {"poll", OPT_NOPARAM, 'P'},
98 {"local_dma_lkey", OPT_NOPARAM, 'Z'},
99 {"read_inv", OPT_NOPARAM, 'R'},
100 {"fr", OPT_NOPARAM, 'f'},
104 #define htonll(x) cpu_to_be64((x))
105 #define ntohll(x) cpu_to_be64((x))
107 static struct mutex krping_mutex;
110 * List of running krping threads.
112 static LIST_HEAD(krping_cbs);
115 * krping "ping/pong" loop:
116 * client sends source rkey/addr/len
117 * server receives source rkey/add/len
118 * server rdma reads "ping" data from source
119 * server sends "go ahead" on rdma read completion
120 * client sends sink rkey/addr/len
121 * server receives sink rkey/addr/len
122 * server rdma writes "pong" data to sink
123 * server sends "go ahead" on rdma write completion
128 * These states are used to signal events between the completion handler
129 * and the main client or server thread.
131 * Once CONNECTED, they cycle through RDMA_READ_ADV, RDMA_WRITE_ADV,
132 * and RDMA_WRITE_COMPLETE for each ping.
147 struct krping_rdma_info {
154 * Default max buffer size for IO...
156 #define RPING_BUFSIZE 128*1024
157 #define RPING_SQ_DEPTH 64
160 * Control block struct.
164 int server; /* 0 iff client */
170 struct ib_mr *dma_mr;
172 struct ib_fast_reg_page_list *page_list;
174 struct ib_send_wr fastreg_wr;
175 struct ib_send_wr invalidate_wr;
176 struct ib_mr *fastreg_mr;
177 int server_invalidate;
182 struct ib_mw_bind bind_attr;
184 struct ib_recv_wr rq_wr; /* recv work request record */
185 struct ib_sge recv_sgl; /* recv single SGE */
186 struct krping_rdma_info recv_buf;/* malloc'd buffer */
188 DECLARE_PCI_UNMAP_ADDR(recv_mapping)
189 struct ib_mr *recv_mr;
191 struct ib_send_wr sq_wr; /* send work requrest record */
192 struct ib_sge send_sgl;
193 struct krping_rdma_info send_buf;/* single send buf */
195 DECLARE_PCI_UNMAP_ADDR(send_mapping)
196 struct ib_mr *send_mr;
198 struct ib_send_wr rdma_sq_wr; /* rdma work request record */
199 struct ib_sge rdma_sgl; /* rdma single SGE */
200 char *rdma_buf; /* used as rdma sink */
202 DECLARE_PCI_UNMAP_ADDR(rdma_mapping)
203 struct ib_mr *rdma_mr;
205 uint32_t remote_rkey; /* remote guys RKEY */
206 uint64_t remote_addr; /* remote guys TO */
207 uint32_t remote_len; /* remote guys LEN */
209 char *start_buf; /* rdma read src */
211 DECLARE_PCI_UNMAP_ADDR(start_mapping)
212 struct ib_mr *start_mr;
214 enum test_state state; /* used for cond/signalling */
215 wait_queue_head_t sem;
216 struct krping_stats stats;
218 uint16_t port; /* dst port in NBO */
219 struct in_addr addr; /* dst addr in NBO */
220 char *addr_str; /* dst addr string */
221 int verbose; /* verbose logging */
222 int count; /* ping count */
223 int size; /* ping data size */
224 int validate; /* validate ping data */
225 int wlat; /* run wlat test */
226 int rlat; /* run rlat test */
227 int bw; /* run bw test */
228 int duplex; /* run bw full duplex test */
229 int poll; /* poll or block for rlat test */
230 int txdepth; /* SQ depth */
231 int local_dma_lkey; /* use 0 for lkey */
232 int frtest; /* fastreg test */
235 struct rdma_cm_id *cm_id; /* connection on client side,*/
236 /* listener on server side. */
237 struct rdma_cm_id *child_cm_id; /* connection on server side */
238 struct list_head list;
241 static int krping_cma_event_handler(struct rdma_cm_id *cma_id,
242 struct rdma_cm_event *event)
245 struct krping_cb *cb = cma_id->context;
247 DEBUG_LOG(cb, "cma_event type %d cma_id %p (%s)\n", event->event,
248 cma_id, (cma_id == cb->cm_id) ? "parent" : "child");
250 switch (event->event) {
251 case RDMA_CM_EVENT_ADDR_RESOLVED:
252 cb->state = ADDR_RESOLVED;
253 ret = rdma_resolve_route(cma_id, 2000);
255 PRINTF(cb, "rdma_resolve_route error %d\n", ret);
256 wake_up_interruptible(&cb->sem);
260 case RDMA_CM_EVENT_ROUTE_RESOLVED:
261 cb->state = ROUTE_RESOLVED;
262 cb->child_cm_id = cma_id;
263 wake_up_interruptible(&cb->sem);
266 case RDMA_CM_EVENT_CONNECT_REQUEST:
267 cb->state = CONNECT_REQUEST;
268 cb->child_cm_id = cma_id;
269 DEBUG_LOG(cb, "child cma %p\n", cb->child_cm_id);
270 wake_up_interruptible(&cb->sem);
273 case RDMA_CM_EVENT_ESTABLISHED:
274 DEBUG_LOG(cb, "ESTABLISHED\n");
276 cb->state = CONNECTED;
278 wake_up_interruptible(&cb->sem);
281 case RDMA_CM_EVENT_ADDR_ERROR:
282 case RDMA_CM_EVENT_ROUTE_ERROR:
283 case RDMA_CM_EVENT_CONNECT_ERROR:
284 case RDMA_CM_EVENT_UNREACHABLE:
285 case RDMA_CM_EVENT_REJECTED:
286 PRINTF(cb, "cma event %d, error %d\n", event->event,
289 wake_up_interruptible(&cb->sem);
292 case RDMA_CM_EVENT_DISCONNECTED:
293 PRINTF(cb, "DISCONNECT EVENT...\n");
295 wake_up_interruptible(&cb->sem);
298 case RDMA_CM_EVENT_DEVICE_REMOVAL:
299 PRINTF(cb, "cma detected device removal!!!!\n");
303 PRINTF(cb, "oof bad type!\n");
304 wake_up_interruptible(&cb->sem);
310 static int server_recv(struct krping_cb *cb, struct ib_wc *wc)
312 if (wc->byte_len != sizeof(cb->recv_buf)) {
313 PRINTF(cb, "Received bogus data, size %d\n",
318 cb->remote_rkey = ntohl(cb->recv_buf.rkey);
319 cb->remote_addr = ntohll(cb->recv_buf.buf);
320 cb->remote_len = ntohl(cb->recv_buf.size);
321 DEBUG_LOG(cb, "Received rkey %x addr %llx len %d from peer\n",
322 cb->remote_rkey, (unsigned long long)cb->remote_addr,
325 if (cb->state <= CONNECTED || cb->state == RDMA_WRITE_COMPLETE)
326 cb->state = RDMA_READ_ADV;
328 cb->state = RDMA_WRITE_ADV;
333 static int client_recv(struct krping_cb *cb, struct ib_wc *wc)
335 if (wc->byte_len != sizeof(cb->recv_buf)) {
336 PRINTF(cb, "Received bogus data, size %d\n",
341 if (cb->state == RDMA_READ_ADV)
342 cb->state = RDMA_WRITE_ADV;
344 cb->state = RDMA_WRITE_COMPLETE;
349 static void krping_cq_event_handler(struct ib_cq *cq, void *ctx)
351 struct krping_cb *cb = ctx;
353 struct ib_recv_wr *bad_wr;
356 BUG_ON(cb->cq != cq);
357 if (cb->state == ERROR) {
358 PRINTF(cb, "cq completion in ERROR state\n");
362 PRINTF(cb, "cq completion event in frtest!\n");
365 if (!cb->wlat && !cb->rlat && !cb->bw)
366 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
367 while ((ret = ib_poll_cq(cb->cq, 1, &wc)) == 1) {
369 if (wc.status == IB_WC_WR_FLUSH_ERR) {
370 DEBUG_LOG(cb, "cq flushed\n");
373 PRINTF(cb, "cq completion failed with "
374 "wr_id %Lx status %d opcode %d vender_err %x\n",
375 wc.wr_id, wc.status, wc.opcode, wc.vendor_err);
382 DEBUG_LOG(cb, "send completion\n");
383 cb->stats.send_bytes += cb->send_sgl.length;
384 cb->stats.send_msgs++;
387 case IB_WC_RDMA_WRITE:
388 DEBUG_LOG(cb, "rdma write completion\n");
389 cb->stats.write_bytes += cb->rdma_sq_wr.sg_list->length;
390 cb->stats.write_msgs++;
391 cb->state = RDMA_WRITE_COMPLETE;
392 wake_up_interruptible(&cb->sem);
395 case IB_WC_RDMA_READ:
396 DEBUG_LOG(cb, "rdma read completion\n");
397 cb->stats.read_bytes += cb->rdma_sq_wr.sg_list->length;
398 cb->stats.read_msgs++;
399 cb->state = RDMA_READ_COMPLETE;
400 wake_up_interruptible(&cb->sem);
404 DEBUG_LOG(cb, "recv completion\n");
405 cb->stats.recv_bytes += sizeof(cb->recv_buf);
406 cb->stats.recv_msgs++;
407 if (cb->wlat || cb->rlat || cb->bw)
408 ret = server_recv(cb, &wc);
410 ret = cb->server ? server_recv(cb, &wc) :
411 client_recv(cb, &wc);
413 PRINTF(cb, "recv wc error: %d\n", ret);
417 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
419 PRINTF(cb, "post recv error: %d\n",
423 wake_up_interruptible(&cb->sem);
428 "%s:%d Unexpected opcode %d, Shutting down\n",
429 __func__, __LINE__, wc.opcode);
434 PRINTF(cb, "poll error %d\n", ret);
440 wake_up_interruptible(&cb->sem);
443 static int krping_accept(struct krping_cb *cb)
445 struct rdma_conn_param conn_param;
448 DEBUG_LOG(cb, "accepting client connection request\n");
450 memset(&conn_param, 0, sizeof conn_param);
451 conn_param.responder_resources = 1;
452 conn_param.initiator_depth = 1;
454 ret = rdma_accept(cb->child_cm_id, &conn_param);
456 PRINTF(cb, "rdma_accept error: %d\n", ret);
460 if (!cb->wlat && !cb->rlat && !cb->bw) {
461 wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
462 if (cb->state == ERROR) {
463 PRINTF(cb, "wait for CONNECTED state %d\n",
471 static void krping_setup_wr(struct krping_cb *cb)
473 cb->recv_sgl.addr = cb->recv_dma_addr;
474 cb->recv_sgl.length = sizeof cb->recv_buf;
475 if (cb->local_dma_lkey)
476 cb->recv_sgl.lkey = cb->qp->device->local_dma_lkey;
477 else if (cb->mem == DMA)
478 cb->recv_sgl.lkey = cb->dma_mr->lkey;
480 cb->recv_sgl.lkey = cb->recv_mr->lkey;
481 cb->rq_wr.sg_list = &cb->recv_sgl;
482 cb->rq_wr.num_sge = 1;
484 cb->send_sgl.addr = cb->send_dma_addr;
485 cb->send_sgl.length = sizeof cb->send_buf;
486 if (cb->local_dma_lkey)
487 cb->send_sgl.lkey = cb->qp->device->local_dma_lkey;
488 else if (cb->mem == DMA)
489 cb->send_sgl.lkey = cb->dma_mr->lkey;
491 cb->send_sgl.lkey = cb->send_mr->lkey;
493 cb->sq_wr.opcode = IB_WR_SEND;
494 cb->sq_wr.send_flags = IB_SEND_SIGNALED;
495 cb->sq_wr.sg_list = &cb->send_sgl;
496 cb->sq_wr.num_sge = 1;
498 if (cb->server || cb->wlat || cb->rlat || cb->bw) {
499 cb->rdma_sgl.addr = cb->rdma_dma_addr;
501 cb->rdma_sgl.lkey = cb->rdma_mr->lkey;
502 cb->rdma_sq_wr.send_flags = IB_SEND_SIGNALED;
503 cb->rdma_sq_wr.sg_list = &cb->rdma_sgl;
504 cb->rdma_sq_wr.num_sge = 1;
511 * A chain of 2 WRs, INVALDATE_MR + FAST_REG_MR.
512 * both unsignaled. The client uses them to reregister
513 * the rdma buffers with a new key each iteration.
515 cb->fastreg_wr.opcode = IB_WR_FAST_REG_MR;
516 cb->fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
517 cb->fastreg_wr.wr.fast_reg.length = cb->size;
518 cb->fastreg_wr.wr.fast_reg.page_list = cb->page_list;
519 cb->fastreg_wr.wr.fast_reg.page_list_len = cb->page_list_len;
521 cb->invalidate_wr.next = &cb->fastreg_wr;
522 cb->invalidate_wr.opcode = IB_WR_LOCAL_INV;
525 cb->bind_attr.wr_id = 0xabbaabba;
526 cb->bind_attr.send_flags = 0; /* unsignaled */
527 cb->bind_attr.length = cb->size;
534 static int krping_setup_buffers(struct krping_cb *cb)
537 struct ib_phys_buf buf;
540 DEBUG_LOG(cb, "krping_setup_buffers called on cb %p\n", cb);
542 cb->recv_dma_addr = dma_map_single(cb->pd->device->dma_device,
544 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
545 pci_unmap_addr_set(cb, recv_mapping, cb->recv_dma_addr);
546 cb->send_dma_addr = dma_map_single(cb->pd->device->dma_device,
547 &cb->send_buf, sizeof(cb->send_buf),
549 pci_unmap_addr_set(cb, send_mapping, cb->send_dma_addr);
551 if (cb->mem == DMA) {
552 cb->dma_mr = ib_get_dma_mr(cb->pd, IB_ACCESS_LOCAL_WRITE|
553 IB_ACCESS_REMOTE_READ|
554 IB_ACCESS_REMOTE_WRITE);
555 if (IS_ERR(cb->dma_mr)) {
556 DEBUG_LOG(cb, "reg_dmamr failed\n");
557 ret = PTR_ERR(cb->dma_mr);
561 if (!cb->local_dma_lkey) {
562 buf.addr = cb->recv_dma_addr;
563 buf.size = sizeof cb->recv_buf;
564 DEBUG_LOG(cb, "recv buf dma_addr %llx size %d\n", buf.addr,
566 iovbase = cb->recv_dma_addr;
567 cb->recv_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
568 IB_ACCESS_LOCAL_WRITE,
571 if (IS_ERR(cb->recv_mr)) {
572 DEBUG_LOG(cb, "recv_buf reg_mr failed\n");
573 ret = PTR_ERR(cb->recv_mr);
577 buf.addr = cb->send_dma_addr;
578 buf.size = sizeof cb->send_buf;
579 DEBUG_LOG(cb, "send buf dma_addr %llx size %d\n", buf.addr,
581 iovbase = cb->send_dma_addr;
582 cb->send_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
585 if (IS_ERR(cb->send_mr)) {
586 DEBUG_LOG(cb, "send_buf reg_mr failed\n");
587 ret = PTR_ERR(cb->send_mr);
593 cb->rdma_buf = kmalloc(cb->size, GFP_KERNEL);
595 DEBUG_LOG(cb, "rdma_buf malloc failed\n");
600 cb->rdma_dma_addr = dma_map_single(cb->pd->device->dma_device,
601 cb->rdma_buf, cb->size,
603 pci_unmap_addr_set(cb, rdma_mapping, cb->rdma_dma_addr);
604 if (cb->mem != DMA) {
607 cb->page_list_len = (((cb->size - 1) & PAGE_MASK) +
608 PAGE_SIZE) >> PAGE_SHIFT;
609 cb->page_list = ib_alloc_fast_reg_page_list(
612 if (IS_ERR(cb->page_list)) {
613 DEBUG_LOG(cb, "recv_buf reg_mr failed\n");
614 ret = PTR_ERR(cb->page_list);
617 cb->fastreg_mr = ib_alloc_fast_reg_mr(cb->pd,
618 cb->page_list->max_page_list_len);
619 if (IS_ERR(cb->fastreg_mr)) {
620 DEBUG_LOG(cb, "recv_buf reg_mr failed\n");
621 ret = PTR_ERR(cb->fastreg_mr);
624 DEBUG_LOG(cb, "fastreg rkey 0x%x page_list %p"
625 " page_list_len %u\n", cb->fastreg_mr->rkey,
626 cb->page_list, cb->page_list_len);
629 cb->mw = ib_alloc_mw(cb->pd);
630 if (IS_ERR(cb->mw)) {
631 DEBUG_LOG(cb, "recv_buf alloc_mw failed\n");
632 ret = PTR_ERR(cb->mw);
635 DEBUG_LOG(cb, "mw rkey 0x%x\n", cb->mw->rkey);
638 buf.addr = cb->rdma_dma_addr;
640 iovbase = cb->rdma_dma_addr;
641 cb->rdma_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
642 IB_ACCESS_REMOTE_READ|
643 IB_ACCESS_REMOTE_WRITE,
645 if (IS_ERR(cb->rdma_mr)) {
646 DEBUG_LOG(cb, "rdma_buf reg_mr failed\n");
647 ret = PTR_ERR(cb->rdma_mr);
650 DEBUG_LOG(cb, "rdma buf dma_addr %llx size %d mr rkey 0x%x\n",
651 buf.addr, (int)buf.size, cb->rdma_mr->rkey);
660 if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
662 cb->start_buf = kmalloc(cb->size, GFP_KERNEL);
663 if (!cb->start_buf) {
664 DEBUG_LOG(cb, "start_buf malloc failed\n");
669 cb->start_dma_addr = dma_map_single(cb->pd->device->dma_device,
670 cb->start_buf, cb->size,
672 pci_unmap_addr_set(cb, start_mapping, cb->start_dma_addr);
674 if (cb->mem == MR || cb->mem == MW) {
675 unsigned flags = IB_ACCESS_REMOTE_READ;
677 if (cb->wlat || cb->rlat || cb->bw)
678 flags |= IB_ACCESS_REMOTE_WRITE;
680 buf.addr = cb->start_dma_addr;
682 DEBUG_LOG(cb, "start buf dma_addr %llx size %d\n",
683 buf.addr, (int)buf.size);
684 iovbase = cb->start_dma_addr;
685 cb->start_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
689 if (IS_ERR(cb->start_mr)) {
690 DEBUG_LOG(cb, "start_buf reg_mr failed\n");
691 ret = PTR_ERR(cb->start_mr);
698 DEBUG_LOG(cb, "allocated & registered buffers...\n");
701 if (cb->fastreg_mr && !IS_ERR(cb->fastreg_mr))
702 ib_dereg_mr(cb->fastreg_mr);
703 if (cb->mw && !IS_ERR(cb->mw))
704 ib_dealloc_mw(cb->mw);
705 if (cb->rdma_mr && !IS_ERR(cb->rdma_mr))
706 ib_dereg_mr(cb->rdma_mr);
707 if (cb->page_list && !IS_ERR(cb->page_list))
708 ib_free_fast_reg_page_list(cb->page_list);
709 if (cb->dma_mr && !IS_ERR(cb->dma_mr))
710 ib_dereg_mr(cb->dma_mr);
711 if (cb->recv_mr && !IS_ERR(cb->recv_mr))
712 ib_dereg_mr(cb->recv_mr);
713 if (cb->send_mr && !IS_ERR(cb->send_mr))
714 ib_dereg_mr(cb->send_mr);
718 kfree(cb->start_buf);
722 static void krping_free_buffers(struct krping_cb *cb)
724 DEBUG_LOG(cb, "krping_free_buffers called on cb %p\n", cb);
727 ib_dereg_mr(cb->dma_mr);
729 ib_dereg_mr(cb->send_mr);
731 ib_dereg_mr(cb->recv_mr);
733 ib_dereg_mr(cb->rdma_mr);
735 ib_dereg_mr(cb->start_mr);
737 ib_dereg_mr(cb->fastreg_mr);
739 ib_dealloc_mw(cb->mw);
741 dma_unmap_single(cb->pd->device->dma_device,
742 pci_unmap_addr(cb, recv_mapping),
743 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
744 dma_unmap_single(cb->pd->device->dma_device,
745 pci_unmap_addr(cb, send_mapping),
746 sizeof(cb->send_buf), DMA_BIDIRECTIONAL);
747 dma_unmap_single(cb->pd->device->dma_device,
748 pci_unmap_addr(cb, rdma_mapping),
749 cb->size, DMA_BIDIRECTIONAL);
752 dma_unmap_single(cb->pd->device->dma_device,
753 pci_unmap_addr(cb, start_mapping),
754 cb->size, DMA_BIDIRECTIONAL);
755 kfree(cb->start_buf);
759 static int krping_create_qp(struct krping_cb *cb)
761 struct ib_qp_init_attr init_attr;
764 memset(&init_attr, 0, sizeof(init_attr));
765 init_attr.cap.max_send_wr = cb->txdepth;
766 init_attr.cap.max_recv_wr = 2;
767 init_attr.cap.max_recv_sge = 1;
768 init_attr.cap.max_send_sge = 1;
769 init_attr.qp_type = IB_QPT_RC;
770 init_attr.send_cq = cb->cq;
771 init_attr.recv_cq = cb->cq;
772 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
775 ret = rdma_create_qp(cb->child_cm_id, cb->pd, &init_attr);
777 cb->qp = cb->child_cm_id->qp;
779 ret = rdma_create_qp(cb->cm_id, cb->pd, &init_attr);
781 cb->qp = cb->cm_id->qp;
787 static void krping_free_qp(struct krping_cb *cb)
789 ib_destroy_qp(cb->qp);
790 ib_destroy_cq(cb->cq);
791 ib_dealloc_pd(cb->pd);
794 static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
797 cb->pd = ib_alloc_pd(cm_id->device);
798 if (IS_ERR(cb->pd)) {
799 PRINTF(cb, "ib_alloc_pd failed\n");
800 return PTR_ERR(cb->pd);
802 DEBUG_LOG(cb, "created pd %p\n", cb->pd);
804 strlcpy(cb->stats.name, cb->pd->device->name, sizeof(cb->stats.name));
806 cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL,
807 cb, cb->txdepth * 2, 0);
808 if (IS_ERR(cb->cq)) {
809 PRINTF(cb, "ib_create_cq failed\n");
810 ret = PTR_ERR(cb->cq);
813 DEBUG_LOG(cb, "created cq %p\n", cb->cq);
815 if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) {
816 ret = ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
818 PRINTF(cb, "ib_create_cq failed\n");
823 ret = krping_create_qp(cb);
825 PRINTF(cb, "krping_create_qp failed: %d\n", ret);
828 DEBUG_LOG(cb, "created qp %p\n", cb->qp);
831 ib_destroy_cq(cb->cq);
833 ib_dealloc_pd(cb->pd);
838 * return the (possibly rebound) rkey for the rdma buffer.
839 * FASTREG mode: invalidate and rebind via fastreg wr.
840 * MW mode: rebind the MW.
841 * other modes: just return the mr rkey.
843 static u32 krping_rdma_rkey(struct krping_cb *cb, u64 buf, int post_inv)
845 u32 rkey = 0xffffffff;
847 struct ib_send_wr *bad_wr;
853 cb->invalidate_wr.ex.invalidate_rkey = cb->fastreg_mr->rkey;
856 * Update the fastreg key.
858 ib_update_fast_reg_key(cb->fastreg_mr, ++cb->key);
859 cb->fastreg_wr.wr.fast_reg.rkey = cb->fastreg_mr->rkey;
862 * Update the fastreg WR with new buf info.
864 if (buf == (u64)cb->start_dma_addr)
865 cb->fastreg_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_READ;
867 cb->fastreg_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
868 cb->fastreg_wr.wr.fast_reg.iova_start = buf;
869 p = (u64)(buf & PAGE_MASK);
870 for (i=0; i < cb->fastreg_wr.wr.fast_reg.page_list_len;
871 i++, p += PAGE_SIZE) {
872 cb->page_list->page_list[i] = p;
873 DEBUG_LOG(cb, "page_list[%d] 0x%llx\n", i, p);
876 DEBUG_LOG(cb, "post_inv = %d, fastreg new rkey 0x%x shift %u len %u"
877 " iova_start %llx page_list_len %u\n",
879 cb->fastreg_wr.wr.fast_reg.rkey,
880 cb->fastreg_wr.wr.fast_reg.page_shift,
881 cb->fastreg_wr.wr.fast_reg.length,
882 cb->fastreg_wr.wr.fast_reg.iova_start,
883 cb->fastreg_wr.wr.fast_reg.page_list_len);
886 ret = ib_post_send(cb->qp, &cb->invalidate_wr, &bad_wr);
888 ret = ib_post_send(cb->qp, &cb->fastreg_wr, &bad_wr);
890 PRINTF(cb, "post send error %d\n", ret);
893 rkey = cb->fastreg_mr->rkey;
897 * Update the MW with new buf info.
899 if (buf == (u64)cb->start_dma_addr) {
900 cb->bind_attr.mw_access_flags = IB_ACCESS_REMOTE_READ;
901 cb->bind_attr.mr = cb->start_mr;
903 cb->bind_attr.mw_access_flags = IB_ACCESS_REMOTE_WRITE;
904 cb->bind_attr.mr = cb->rdma_mr;
906 cb->bind_attr.addr = buf;
907 DEBUG_LOG(cb, "binding mw rkey 0x%x to buf %llx mr rkey 0x%x\n",
908 cb->mw->rkey, buf, cb->bind_attr.mr->rkey);
909 ret = ib_bind_mw(cb->qp, cb->mw, &cb->bind_attr);
911 PRINTF(cb, "bind mw error %d\n", ret);
917 if (buf == (u64)cb->start_dma_addr)
918 rkey = cb->start_mr->rkey;
920 rkey = cb->rdma_mr->rkey;
923 rkey = cb->dma_mr->rkey;
926 PRINTF(cb, "%s:%d case ERROR\n", __func__, __LINE__);
933 static void krping_format_send(struct krping_cb *cb, u64 buf)
935 struct krping_rdma_info *info = &cb->send_buf;
939 * Client side will do fastreg or mw bind before
940 * advertising the rdma buffer. Server side
941 * sends have no data.
943 if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
944 rkey = krping_rdma_rkey(cb, buf, !cb->server_invalidate);
945 info->buf = htonll(buf);
946 info->rkey = htonl(rkey);
947 info->size = htonl(cb->size);
948 DEBUG_LOG(cb, "RDMA addr %llx rkey %x len %d\n",
949 (unsigned long long)buf, rkey, cb->size);
953 static void krping_test_server(struct krping_cb *cb)
955 struct ib_send_wr *bad_wr, inv;
959 /* Wait for client's Start STAG/TO/Len */
960 wait_event_interruptible(cb->sem, cb->state >= RDMA_READ_ADV);
961 if (cb->state != RDMA_READ_ADV) {
962 PRINTF(cb, "wait for RDMA_READ_ADV state %d\n",
967 DEBUG_LOG(cb, "server received sink adv\n");
969 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
970 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
971 cb->rdma_sq_wr.sg_list->length = cb->remote_len;
972 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 1);
973 cb->rdma_sq_wr.next = NULL;
975 /* Issue RDMA Read. */
977 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
980 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ;
981 if (cb->mem == FASTREG) {
983 * Immediately follow the read with a
986 cb->rdma_sq_wr.next = &inv;
987 memset(&inv, 0, sizeof inv);
988 inv.opcode = IB_WR_LOCAL_INV;
989 inv.ex.invalidate_rkey = cb->fastreg_mr->rkey;
990 inv.send_flags = IB_SEND_FENCE;
994 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr);
996 PRINTF(cb, "post send error %d\n", ret);
999 cb->rdma_sq_wr.next = NULL;
1001 DEBUG_LOG(cb, "server posted rdma read req \n");
1003 /* Wait for read completion */
1004 wait_event_interruptible(cb->sem,
1005 cb->state >= RDMA_READ_COMPLETE);
1006 if (cb->state != RDMA_READ_COMPLETE) {
1008 "wait for RDMA_READ_COMPLETE state %d\n",
1012 DEBUG_LOG(cb, "server received read complete\n");
1014 /* Display data in recv buf */
1016 PRINTF(cb, "server ping data: %s\n",
1019 /* Tell client to continue */
1020 if (cb->server && cb->server_invalidate) {
1021 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
1022 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
1023 DEBUG_LOG(cb, "send-w-inv rkey 0x%x\n", cb->remote_rkey);
1025 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1027 PRINTF(cb, "post send error %d\n", ret);
1030 DEBUG_LOG(cb, "server posted go ahead\n");
1032 /* Wait for client's RDMA STAG/TO/Len */
1033 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
1034 if (cb->state != RDMA_WRITE_ADV) {
1036 "wait for RDMA_WRITE_ADV state %d\n",
1040 DEBUG_LOG(cb, "server received sink adv\n");
1042 /* RDMA Write echo data */
1043 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1044 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1045 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1046 cb->rdma_sq_wr.sg_list->length = strlen(cb->rdma_buf) + 1;
1047 if (cb->local_dma_lkey)
1048 cb->rdma_sgl.lkey = cb->qp->device->local_dma_lkey;
1050 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 0);
1052 DEBUG_LOG(cb, "rdma write from lkey %x laddr %llx len %d\n",
1053 cb->rdma_sq_wr.sg_list->lkey,
1054 (unsigned long long)cb->rdma_sq_wr.sg_list->addr,
1055 cb->rdma_sq_wr.sg_list->length);
1057 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr);
1059 PRINTF(cb, "post send error %d\n", ret);
1063 /* Wait for completion */
1064 ret = wait_event_interruptible(cb->sem, cb->state >=
1065 RDMA_WRITE_COMPLETE);
1066 if (cb->state != RDMA_WRITE_COMPLETE) {
1068 "wait for RDMA_WRITE_COMPLETE state %d\n",
1072 DEBUG_LOG(cb, "server rdma write complete \n");
1074 cb->state = CONNECTED;
1076 /* Tell client to begin again */
1077 if (cb->server && cb->server_invalidate) {
1078 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
1079 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
1080 DEBUG_LOG(cb, "send-w-inv rkey 0x%x\n", cb->remote_rkey);
1082 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1084 PRINTF(cb, "post send error %d\n", ret);
1087 DEBUG_LOG(cb, "server posted go ahead\n");
1091 static void rlat_test(struct krping_cb *cb)
1094 int iters = cb->count;
1095 struct timeval start_tv, stop_tv;
1098 struct ib_send_wr *bad_wr;
1102 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ;
1103 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1104 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1105 cb->rdma_sq_wr.sg_list->length = cb->size;
1107 microtime(&start_tv);
1109 cb->state = RDMA_READ_ADV;
1110 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
1112 while (scnt < iters) {
1114 cb->state = RDMA_READ_ADV;
1115 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr);
1118 "Couldn't post send: ret=%d scnt %d\n",
1125 wait_event_interruptible(cb->sem,
1126 cb->state != RDMA_READ_ADV);
1127 if (cb->state == RDMA_READ_COMPLETE) {
1129 ib_req_notify_cq(cb->cq,
1135 ne = ib_poll_cq(cb->cq, 1, &wc);
1136 if (cb->state == ERROR) {
1138 "state == ERROR...bailing scnt %d\n",
1145 PRINTF(cb, "poll CQ failed %d\n", ne);
1148 if (cb->poll && wc.status != IB_WC_SUCCESS) {
1149 PRINTF(cb, "Completion wth error at %s:\n",
1150 cb->server ? "server" : "client");
1151 PRINTF(cb, "Failed status %d: wr_id %d\n",
1152 wc.status, (int) wc.wr_id);
1157 microtime(&stop_tv);
1159 if (stop_tv.tv_usec < start_tv.tv_usec) {
1160 stop_tv.tv_usec += 1000000;
1161 stop_tv.tv_sec -= 1;
1164 PRINTF(cb, "delta sec %lu delta usec %lu iter %d size %d\n",
1165 stop_tv.tv_sec - start_tv.tv_sec,
1166 stop_tv.tv_usec - start_tv.tv_usec,
1170 static void wlat_test(struct krping_cb *cb)
1172 int ccnt, scnt, rcnt;
1173 int iters=cb->count;
1174 volatile char *poll_buf = (char *) cb->start_buf;
1175 char *buf = (char *)cb->rdma_buf;
1176 struct timeval start_tv, stop_tv;
1177 cycles_t *post_cycles_start, *post_cycles_stop;
1178 cycles_t *poll_cycles_start, *poll_cycles_stop;
1179 cycles_t *last_poll_cycles_start;
1180 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1182 int cycle_iters = 1000;
1188 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1189 if (!post_cycles_start) {
1190 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1193 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1194 if (!post_cycles_stop) {
1195 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1198 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1199 if (!poll_cycles_start) {
1200 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1203 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1204 if (!poll_cycles_stop) {
1205 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1208 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1210 if (!last_poll_cycles_start) {
1211 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1214 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1215 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1216 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1217 cb->rdma_sq_wr.sg_list->length = cb->size;
1219 if (cycle_iters > iters)
1220 cycle_iters = iters;
1221 microtime(&start_tv);
1222 while (scnt < iters || ccnt < iters || rcnt < iters) {
1224 /* Wait till buffer changes. */
1225 if (rcnt < iters && !(scnt < 1 && !cb->server)) {
1227 while (*poll_buf != (char)rcnt) {
1228 if (cb->state == ERROR) {
1230 "state = ERROR, bailing\n");
1237 struct ib_send_wr *bad_wr;
1239 *buf = (char)scnt+1;
1240 if (scnt < cycle_iters)
1241 post_cycles_start[scnt] = get_cycles();
1242 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) {
1244 "Couldn't post send: scnt=%d\n",
1248 if (scnt < cycle_iters)
1249 post_cycles_stop[scnt] = get_cycles();
1257 if (ccnt < cycle_iters)
1258 poll_cycles_start[ccnt] = get_cycles();
1260 if (ccnt < cycle_iters)
1261 last_poll_cycles_start[ccnt] =
1263 ne = ib_poll_cq(cb->cq, 1, &wc);
1265 if (ccnt < cycle_iters)
1266 poll_cycles_stop[ccnt] = get_cycles();
1270 PRINTF(cb, "poll CQ failed %d\n", ne);
1273 if (wc.status != IB_WC_SUCCESS) {
1275 "Completion wth error at %s:\n",
1276 cb->server ? "server" : "client");
1278 "Failed status %d: wr_id %d\n",
1279 wc.status, (int) wc.wr_id);
1281 "scnt=%d, rcnt=%d, ccnt=%d\n",
1287 microtime(&stop_tv);
1289 if (stop_tv.tv_usec < start_tv.tv_usec) {
1290 stop_tv.tv_usec += 1000000;
1291 stop_tv.tv_sec -= 1;
1294 for (i=0; i < cycle_iters; i++) {
1295 sum_post += post_cycles_stop[i] - post_cycles_start[i];
1296 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1297 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1300 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1301 " sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1302 stop_tv.tv_sec - start_tv.tv_sec,
1303 stop_tv.tv_usec - start_tv.tv_usec,
1304 scnt, cb->size, cycle_iters,
1305 (unsigned long long)sum_post, (unsigned long long)sum_poll,
1306 (unsigned long long)sum_last_poll);
1307 kfree(post_cycles_start);
1308 kfree(post_cycles_stop);
1309 kfree(poll_cycles_start);
1310 kfree(poll_cycles_stop);
1311 kfree(last_poll_cycles_start);
1314 static void bw_test(struct krping_cb *cb)
1316 int ccnt, scnt, rcnt;
1317 int iters=cb->count;
1318 struct timeval start_tv, stop_tv;
1319 cycles_t *post_cycles_start, *post_cycles_stop;
1320 cycles_t *poll_cycles_start, *poll_cycles_stop;
1321 cycles_t *last_poll_cycles_start;
1322 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1324 int cycle_iters = 1000;
1330 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1331 if (!post_cycles_start) {
1332 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1335 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1336 if (!post_cycles_stop) {
1337 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1340 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1341 if (!poll_cycles_start) {
1342 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1345 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1346 if (!poll_cycles_stop) {
1347 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1350 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1352 if (!last_poll_cycles_start) {
1353 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1356 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1357 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1358 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1359 cb->rdma_sq_wr.sg_list->length = cb->size;
1361 if (cycle_iters > iters)
1362 cycle_iters = iters;
1363 microtime(&start_tv);
1364 while (scnt < iters || ccnt < iters) {
1366 while (scnt < iters && scnt - ccnt < cb->txdepth) {
1367 struct ib_send_wr *bad_wr;
1369 if (scnt < cycle_iters)
1370 post_cycles_start[scnt] = get_cycles();
1371 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) {
1373 "Couldn't post send: scnt=%d\n",
1377 if (scnt < cycle_iters)
1378 post_cycles_stop[scnt] = get_cycles();
1386 if (ccnt < cycle_iters)
1387 poll_cycles_start[ccnt] = get_cycles();
1389 if (ccnt < cycle_iters)
1390 last_poll_cycles_start[ccnt] =
1392 ne = ib_poll_cq(cb->cq, 1, &wc);
1394 if (ccnt < cycle_iters)
1395 poll_cycles_stop[ccnt] = get_cycles();
1399 PRINTF(cb, "poll CQ failed %d\n", ne);
1402 if (wc.status != IB_WC_SUCCESS) {
1404 "Completion wth error at %s:\n",
1405 cb->server ? "server" : "client");
1407 "Failed status %d: wr_id %d\n",
1408 wc.status, (int) wc.wr_id);
1413 microtime(&stop_tv);
1415 if (stop_tv.tv_usec < start_tv.tv_usec) {
1416 stop_tv.tv_usec += 1000000;
1417 stop_tv.tv_sec -= 1;
1420 for (i=0; i < cycle_iters; i++) {
1421 sum_post += post_cycles_stop[i] - post_cycles_start[i];
1422 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1423 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1426 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1427 " sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1428 stop_tv.tv_sec - start_tv.tv_sec,
1429 stop_tv.tv_usec - start_tv.tv_usec,
1430 scnt, cb->size, cycle_iters,
1431 (unsigned long long)sum_post, (unsigned long long)sum_poll,
1432 (unsigned long long)sum_last_poll);
1433 kfree(post_cycles_start);
1434 kfree(post_cycles_stop);
1435 kfree(poll_cycles_start);
1436 kfree(poll_cycles_stop);
1437 kfree(last_poll_cycles_start);
1440 static void krping_rlat_test_server(struct krping_cb *cb)
1442 struct ib_send_wr *bad_wr;
1446 /* Spin waiting for client's Start STAG/TO/Len */
1447 while (cb->state < RDMA_READ_ADV) {
1448 krping_cq_event_handler(cb->cq, cb);
1451 /* Send STAG/TO/Len to client */
1452 krping_format_send(cb, cb->start_dma_addr);
1453 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1455 PRINTF(cb, "post send error %d\n", ret);
1459 /* Spin waiting for send completion */
1460 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1462 PRINTF(cb, "poll error %d\n", ret);
1466 PRINTF(cb, "send completiong error %d\n", wc.status);
1470 wait_event_interruptible(cb->sem, cb->state == ERROR);
1473 static void krping_wlat_test_server(struct krping_cb *cb)
1475 struct ib_send_wr *bad_wr;
1479 /* Spin waiting for client's Start STAG/TO/Len */
1480 while (cb->state < RDMA_READ_ADV) {
1481 krping_cq_event_handler(cb->cq, cb);
1484 /* Send STAG/TO/Len to client */
1485 krping_format_send(cb, cb->start_dma_addr);
1486 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1488 PRINTF(cb, "post send error %d\n", ret);
1492 /* Spin waiting for send completion */
1493 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1495 PRINTF(cb, "poll error %d\n", ret);
1499 PRINTF(cb, "send completiong error %d\n", wc.status);
1504 wait_event_interruptible(cb->sem, cb->state == ERROR);
1507 static void krping_bw_test_server(struct krping_cb *cb)
1509 struct ib_send_wr *bad_wr;
1513 /* Spin waiting for client's Start STAG/TO/Len */
1514 while (cb->state < RDMA_READ_ADV) {
1515 krping_cq_event_handler(cb->cq, cb);
1518 /* Send STAG/TO/Len to client */
1519 krping_format_send(cb, cb->start_dma_addr);
1520 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1522 PRINTF(cb, "post send error %d\n", ret);
1526 /* Spin waiting for send completion */
1527 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1529 PRINTF(cb, "poll error %d\n", ret);
1533 PRINTF(cb, "send completiong error %d\n", wc.status);
1539 wait_event_interruptible(cb->sem, cb->state == ERROR);
1542 static int fastreg_supported(struct krping_cb *cb)
1544 struct ib_device *dev = cb->child_cm_id->device;
1545 struct ib_device_attr attr;
1548 ret = ib_query_device(dev, &attr);
1550 PRINTF(cb, "ib_query_device failed ret %d\n", ret);
1553 if (!(attr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
1554 PRINTF(cb, "Fastreg not supported - device_cap_flags 0x%x\n",
1555 attr.device_cap_flags);
1558 DEBUG_LOG(cb, "Fastreg supported - device_cap_flags 0x%x\n",
1559 attr.device_cap_flags);
1563 static int krping_bind_server(struct krping_cb *cb)
1565 struct sockaddr_in sin;
1568 memset(&sin, 0, sizeof(sin));
1569 sin.sin_len = sizeof sin;
1570 sin.sin_family = AF_INET;
1571 sin.sin_addr.s_addr = cb->addr.s_addr;
1572 sin.sin_port = cb->port;
1574 ret = rdma_bind_addr(cb->cm_id, (struct sockaddr *) &sin);
1576 PRINTF(cb, "rdma_bind_addr error %d\n", ret);
1579 DEBUG_LOG(cb, "rdma_bind_addr successful\n");
1581 DEBUG_LOG(cb, "rdma_listen\n");
1582 ret = rdma_listen(cb->cm_id, 3);
1584 PRINTF(cb, "rdma_listen failed: %d\n", ret);
1588 wait_event_interruptible(cb->sem, cb->state >= CONNECT_REQUEST);
1589 if (cb->state != CONNECT_REQUEST) {
1590 PRINTF(cb, "wait for CONNECT_REQUEST state %d\n",
1595 if (cb->mem == FASTREG && !fastreg_supported(cb))
1601 static void krping_run_server(struct krping_cb *cb)
1603 struct ib_recv_wr *bad_wr;
1606 ret = krping_bind_server(cb);
1610 ret = krping_setup_qp(cb, cb->child_cm_id);
1612 PRINTF(cb, "setup_qp failed: %d\n", ret);
1616 ret = krping_setup_buffers(cb);
1618 PRINTF(cb, "krping_setup_buffers failed: %d\n", ret);
1622 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1624 PRINTF(cb, "ib_post_recv failed: %d\n", ret);
1628 ret = krping_accept(cb);
1630 PRINTF(cb, "connect error %d\n", ret);
1635 krping_wlat_test_server(cb);
1637 krping_rlat_test_server(cb);
1639 krping_bw_test_server(cb);
1641 krping_test_server(cb);
1642 rdma_disconnect(cb->child_cm_id);
1644 krping_free_buffers(cb);
1648 rdma_destroy_id(cb->child_cm_id);
1651 static void krping_test_client(struct krping_cb *cb)
1653 int ping, start, cc, i, ret;
1654 struct ib_send_wr *bad_wr;
1658 for (ping = 0; !cb->count || ping < cb->count; ping++) {
1659 cb->state = RDMA_READ_ADV;
1661 /* Put some ascii text in the buffer. */
1662 cc = sprintf(cb->start_buf, "rdma-ping-%d: ", ping);
1663 for (i = cc, c = start; i < cb->size; i++) {
1664 cb->start_buf[i] = c;
1672 cb->start_buf[cb->size - 1] = 0;
1674 krping_format_send(cb, cb->start_dma_addr);
1675 if (cb->state == ERROR) {
1676 PRINTF(cb, "krping_format_send failed\n");
1679 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1681 PRINTF(cb, "post send error %d\n", ret);
1685 /* Wait for server to ACK */
1686 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
1687 if (cb->state != RDMA_WRITE_ADV) {
1689 "wait for RDMA_WRITE_ADV state %d\n",
1694 krping_format_send(cb, cb->rdma_dma_addr);
1695 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1697 PRINTF(cb, "post send error %d\n", ret);
1701 /* Wait for the server to say the RDMA Write is complete. */
1702 wait_event_interruptible(cb->sem,
1703 cb->state >= RDMA_WRITE_COMPLETE);
1704 if (cb->state != RDMA_WRITE_COMPLETE) {
1706 "wait for RDMA_WRITE_COMPLETE state %d\n",
1712 if (memcmp(cb->start_buf, cb->rdma_buf, cb->size)) {
1713 PRINTF(cb, "data mismatch!\n");
1718 PRINTF(cb, "ping data: %s\n", cb->rdma_buf);
1720 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1725 static void krping_rlat_test_client(struct krping_cb *cb)
1727 struct ib_send_wr *bad_wr;
1731 cb->state = RDMA_READ_ADV;
1733 /* Send STAG/TO/Len to client */
1734 krping_format_send(cb, cb->start_dma_addr);
1735 if (cb->state == ERROR) {
1736 PRINTF(cb, "krping_format_send failed\n");
1739 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1741 PRINTF(cb, "post send error %d\n", ret);
1745 /* Spin waiting for send completion */
1746 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1748 PRINTF(cb, "poll error %d\n", ret);
1752 PRINTF(cb, "send completion error %d\n", wc.status);
1756 /* Spin waiting for server's Start STAG/TO/Len */
1757 while (cb->state < RDMA_WRITE_ADV) {
1758 krping_cq_event_handler(cb->cq, cb);
1764 struct timeval start, stop;
1767 unsigned long long elapsed;
1769 struct ib_send_wr *bad_wr;
1772 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1773 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1774 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1775 cb->rdma_sq_wr.sg_list->length = 0;
1776 cb->rdma_sq_wr.num_sge = 0;
1779 for (i=0; i < 100000; i++) {
1780 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) {
1781 PRINTF(cb, "Couldn't post send\n");
1785 ne = ib_poll_cq(cb->cq, 1, &wc);
1788 PRINTF(cb, "poll CQ failed %d\n", ne);
1791 if (wc.status != IB_WC_SUCCESS) {
1792 PRINTF(cb, "Completion wth error at %s:\n",
1793 cb->server ? "server" : "client");
1794 PRINTF(cb, "Failed status %d: wr_id %d\n",
1795 wc.status, (int) wc.wr_id);
1801 if (stop.tv_usec < start.tv_usec) {
1802 stop.tv_usec += 1000000;
1805 sec = stop.tv_sec - start.tv_sec;
1806 usec = stop.tv_usec - start.tv_usec;
1807 elapsed = sec * 1000000 + usec;
1808 PRINTF(cb, "0B-write-lat iters 100000 usec %llu\n", elapsed);
1815 static void krping_wlat_test_client(struct krping_cb *cb)
1817 struct ib_send_wr *bad_wr;
1821 cb->state = RDMA_READ_ADV;
1823 /* Send STAG/TO/Len to client */
1824 krping_format_send(cb, cb->start_dma_addr);
1825 if (cb->state == ERROR) {
1826 PRINTF(cb, "krping_format_send failed\n");
1829 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1831 PRINTF(cb, "post send error %d\n", ret);
1835 /* Spin waiting for send completion */
1836 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1838 PRINTF(cb, "poll error %d\n", ret);
1842 PRINTF(cb, "send completion error %d\n", wc.status);
1846 /* Spin waiting for server's Start STAG/TO/Len */
1847 while (cb->state < RDMA_WRITE_ADV) {
1848 krping_cq_event_handler(cb->cq, cb);
1854 static void krping_bw_test_client(struct krping_cb *cb)
1856 struct ib_send_wr *bad_wr;
1860 cb->state = RDMA_READ_ADV;
1862 /* Send STAG/TO/Len to client */
1863 krping_format_send(cb, cb->start_dma_addr);
1864 if (cb->state == ERROR) {
1865 PRINTF(cb, "krping_format_send failed\n");
1868 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1870 PRINTF(cb, "post send error %d\n", ret);
1874 /* Spin waiting for send completion */
1875 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1877 PRINTF(cb, "poll error %d\n", ret);
1881 PRINTF(cb, "send completion error %d\n", wc.status);
1885 /* Spin waiting for server's Start STAG/TO/Len */
1886 while (cb->state < RDMA_WRITE_ADV) {
1887 krping_cq_event_handler(cb->cq, cb);
1893 static void krping_fr_test(struct krping_cb *cb)
1895 struct ib_fast_reg_page_list *pl;
1896 struct ib_send_wr fr, inv, *bad;
1902 int size = cb->size;
1903 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1908 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen);
1910 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl));
1914 mr = ib_alloc_fast_reg_mr(cb->pd, plen);
1916 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl));
1920 for (i=0; i<plen; i++)
1921 pl->page_list[i] = 0xcafebabe | i;
1923 memset(&fr, 0, sizeof fr);
1924 fr.opcode = IB_WR_FAST_REG_MR;
1925 fr.wr.fast_reg.page_shift = PAGE_SHIFT;
1926 fr.wr.fast_reg.length = size;
1927 fr.wr.fast_reg.page_list = pl;
1928 fr.wr.fast_reg.page_list_len = plen;
1929 fr.wr.fast_reg.iova_start = 0;
1930 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
1932 memset(&inv, 0, sizeof inv);
1933 inv.opcode = IB_WR_LOCAL_INV;
1934 inv.send_flags = IB_SEND_SIGNALED;
1936 DEBUG_LOG(cb, "fr_test: stag index 0x%x plen %u size %u depth %u\n", mr->rkey >> 8, plen, cb->size, cb->txdepth);
1937 start = time_uptime;
1939 if ((time_uptime - start) >= 9) {
1940 DEBUG_LOG(cb, "fr_test: pausing 1 second! count %u latest size %u plen %u\n", count, size, plen);
1941 wait_event_interruptible(cb->sem, cb->state == ERROR);
1942 if (cb->state == ERROR)
1944 start = time_uptime;
1946 while (scnt < (cb->txdepth>>1)) {
1947 ib_update_fast_reg_key(mr, ++key);
1948 fr.wr.fast_reg.rkey = mr->rkey;
1949 inv.ex.invalidate_rkey = mr->rkey;
1950 size = arc4random() % cb->size;
1953 plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1954 fr.wr.fast_reg.length = size;
1955 fr.wr.fast_reg.page_list_len = plen;
1956 ret = ib_post_send(cb->qp, &fr, &bad);
1958 PRINTF(cb, "ib_post_send failed %d\n", ret);
1965 ret = ib_poll_cq(cb->cq, 1, &wc);
1967 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
1972 PRINTF(cb, "completion error %u\n", wc.status);
1978 else if (krping_sigpending()) {
1979 PRINTF(cb, "signal!\n");
1986 DEBUG_LOG(cb, "sleeping 1 second\n");
1987 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1989 DEBUG_LOG(cb, "draining the cq...\n");
1991 ret = ib_poll_cq(cb->cq, 1, &wc);
1993 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
1998 PRINTF(cb, "completion error %u opcode %u\n", wc.status, wc.opcode);
2002 DEBUG_LOG(cb, "fr_test: done!\n");
2005 ib_free_fast_reg_page_list(pl);
2008 static int krping_connect_client(struct krping_cb *cb)
2010 struct rdma_conn_param conn_param;
2013 memset(&conn_param, 0, sizeof conn_param);
2014 conn_param.responder_resources = 1;
2015 conn_param.initiator_depth = 1;
2016 conn_param.retry_count = 10;
2018 ret = rdma_connect(cb->cm_id, &conn_param);
2020 PRINTF(cb, "rdma_connect error %d\n", ret);
2024 wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
2025 if (cb->state == ERROR) {
2026 PRINTF(cb, "wait for CONNECTED state %d\n", cb->state);
2030 DEBUG_LOG(cb, "rdma_connect successful\n");
2034 static int krping_bind_client(struct krping_cb *cb)
2036 struct sockaddr_in sin;
2039 memset(&sin, 0, sizeof(sin));
2040 sin.sin_len = sizeof sin;
2041 sin.sin_family = AF_INET;
2042 sin.sin_addr.s_addr = cb->addr.s_addr;
2043 sin.sin_port = cb->port;
2045 ret = rdma_resolve_addr(cb->cm_id, NULL, (struct sockaddr *) &sin,
2048 PRINTF(cb, "rdma_resolve_addr error %d\n", ret);
2052 wait_event_interruptible(cb->sem, cb->state >= ROUTE_RESOLVED);
2053 if (cb->state != ROUTE_RESOLVED) {
2055 "addr/route resolution did not resolve: state %d\n",
2060 if (cb->mem == FASTREG && !fastreg_supported(cb))
2063 DEBUG_LOG(cb, "rdma_resolve_addr - rdma_resolve_route successful\n");
2067 static void krping_run_client(struct krping_cb *cb)
2069 struct ib_recv_wr *bad_wr;
2072 ret = krping_bind_client(cb);
2076 ret = krping_setup_qp(cb, cb->cm_id);
2078 PRINTF(cb, "setup_qp failed: %d\n", ret);
2082 ret = krping_setup_buffers(cb);
2084 PRINTF(cb, "krping_setup_buffers failed: %d\n", ret);
2088 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
2090 PRINTF(cb, "ib_post_recv failed: %d\n", ret);
2094 ret = krping_connect_client(cb);
2096 PRINTF(cb, "connect error %d\n", ret);
2101 krping_wlat_test_client(cb);
2103 krping_rlat_test_client(cb);
2105 krping_bw_test_client(cb);
2106 else if (cb->frtest)
2109 krping_test_client(cb);
2110 rdma_disconnect(cb->cm_id);
2112 krping_free_buffers(cb);
2117 int krping_doit(char *cmd, void *cookie)
2119 struct krping_cb *cb;
2123 unsigned long optint;
2125 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
2129 mutex_lock(&krping_mutex);
2130 list_add_tail(&cb->list, &krping_cbs);
2131 mutex_unlock(&krping_mutex);
2133 cb->cookie = cookie;
2137 cb->txdepth = RPING_SQ_DEPTH;
2139 init_waitqueue_head(&cb->sem);
2141 while ((op = krping_getopt("krping", &cmd, krping_opts, NULL, &optarg,
2145 cb->addr_str = optarg;
2146 DEBUG_LOG(cb, "ipaddr (%s)\n", optarg);
2147 if (!inet_aton(optarg, &cb->addr)) {
2148 PRINTF(cb, "bad addr string %s\n",
2154 cb->port = htons(optint);
2155 DEBUG_LOG(cb, "port %d\n", (int)optint);
2159 DEBUG_LOG(cb, "server\n");
2163 DEBUG_LOG(cb, "server\n");
2167 DEBUG_LOG(cb, "client\n");
2171 if ((cb->size < 1) ||
2172 (cb->size > RPING_BUFSIZE)) {
2173 PRINTF(cb, "Invalid size %d "
2174 "(valid range is 1 to %d)\n",
2175 cb->size, RPING_BUFSIZE);
2178 DEBUG_LOG(cb, "size %d\n", (int)optint);
2182 if (cb->count < 0) {
2183 PRINTF(cb, "Invalid count %d\n",
2187 DEBUG_LOG(cb, "count %d\n", (int) cb->count);
2191 DEBUG_LOG(cb, "verbose\n");
2195 DEBUG_LOG(cb, "validate data\n");
2210 if (!strncmp(optarg, "dma", 3))
2212 else if (!strncmp(optarg, "fastreg", 7))
2214 else if (!strncmp(optarg, "mw", 2))
2216 else if (!strncmp(optarg, "mr", 2))
2219 PRINTF(cb, "unknown mem mode %s. "
2220 "Must be dma, fastreg, mw, or mr\n",
2227 cb->server_invalidate = 1;
2230 cb->txdepth = optint;
2231 DEBUG_LOG(cb, "txdepth %d\n", (int) cb->txdepth);
2234 cb->local_dma_lkey = 1;
2235 DEBUG_LOG(cb, "using local dma lkey\n");
2239 DEBUG_LOG(cb, "using read-with-inv\n");
2243 DEBUG_LOG(cb, "fast-reg test!\n");
2246 PRINTF(cb, "unknown opt %s\n", optarg);
2254 if (cb->server == -1) {
2255 PRINTF(cb, "must be either client or server\n");
2260 if (cb->server && cb->frtest) {
2261 PRINTF(cb, "must be client to run frtest\n");
2266 if ((cb->frtest + cb->bw + cb->rlat + cb->wlat) > 1) {
2267 PRINTF(cb, "Pick only one test: fr, bw, rlat, wlat\n");
2272 if (cb->server_invalidate && cb->mem != FASTREG) {
2273 PRINTF(cb, "server_invalidate only valid with fastreg mem_mode\n");
2278 if (cb->read_inv && cb->mem != FASTREG) {
2279 PRINTF(cb, "read_inv only valid with fastreg mem_mode\n");
2284 if (cb->mem != MR && (cb->wlat || cb->rlat || cb->bw)) {
2285 PRINTF(cb, "wlat, rlat, and bw tests only support mem_mode MR\n");
2290 cb->cm_id = rdma_create_id(krping_cma_event_handler, cb, RDMA_PS_TCP);
2291 if (IS_ERR(cb->cm_id)) {
2292 ret = PTR_ERR(cb->cm_id);
2293 PRINTF(cb, "rdma_create_id error %d\n", ret);
2296 DEBUG_LOG(cb, "created cm_id %p\n", cb->cm_id);
2299 krping_run_server(cb);
2301 krping_run_client(cb);
2303 DEBUG_LOG(cb, "destroy cm_id %p\n", cb->cm_id);
2304 rdma_destroy_id(cb->cm_id);
2306 mutex_lock(&krping_mutex);
2307 list_del(&cb->list);
2308 mutex_unlock(&krping_mutex);
2314 krping_walk_cb_list(void (*f)(struct krping_stats *, void *), void *arg)
2316 struct krping_cb *cb;
2318 mutex_lock(&krping_mutex);
2319 list_for_each_entry(cb, &krping_cbs, list)
2320 (*f)(cb->pd ? &cb->stats : NULL, arg);
2321 mutex_unlock(&krping_mutex);
2324 void krping_init(void)
2327 mutex_init(&krping_mutex);