2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2006-2009 Open Grid Computing, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/slab.h>
40 #include <linux/err.h>
41 #include <linux/string.h>
42 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/pci.h>
46 #include <linux/sched.h>
48 #include <asm/atomic.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/rdma_cm.h>
56 extern int krping_debug;
57 #define DEBUG_LOG(cb, x...) if (krping_debug) log(LOG_INFO, x)
58 #define PRINTF(cb, x...) log(LOG_INFO, x)
61 MODULE_AUTHOR("Steve Wise");
62 MODULE_DESCRIPTION("RDMA ping client/server");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(krping, 1);
65 MODULE_DEPEND(krping, linuxkpi, 1, 1, 1);
67 static __inline uint64_t
71 __asm __volatile("rdtsc" : "=a" (low), "=d" (high));
72 return (low | ((u_int64_t)high << 32));
75 typedef uint64_t cycles_t;
84 static const struct krping_option krping_opts[] = {
85 {"count", OPT_INT, 'C'},
86 {"size", OPT_INT, 'S'},
87 {"addr", OPT_STRING, 'a'},
88 {"port", OPT_INT, 'p'},
89 {"verbose", OPT_NOPARAM, 'v'},
90 {"validate", OPT_NOPARAM, 'V'},
91 {"server", OPT_NOPARAM, 's'},
92 {"client", OPT_NOPARAM, 'c'},
93 {"mem_mode", OPT_STRING, 'm'},
94 {"server_inv", OPT_NOPARAM, 'I'},
95 {"wlat", OPT_NOPARAM, 'l'},
96 {"rlat", OPT_NOPARAM, 'L'},
97 {"bw", OPT_NOPARAM, 'B'},
98 {"duplex", OPT_NOPARAM, 'd'},
99 {"txdepth", OPT_INT, 'T'},
100 {"poll", OPT_NOPARAM, 'P'},
101 {"local_dma_lkey", OPT_NOPARAM, 'Z'},
102 {"read_inv", OPT_NOPARAM, 'R'},
103 {"fr", OPT_INT, 'f'},
107 #define htonll(x) cpu_to_be64((x))
108 #define ntohll(x) cpu_to_be64((x))
110 static struct mutex krping_mutex;
113 * List of running krping threads.
115 static LIST_HEAD(krping_cbs);
118 * krping "ping/pong" loop:
119 * client sends source rkey/addr/len
120 * server receives source rkey/add/len
121 * server rdma reads "ping" data from source
122 * server sends "go ahead" on rdma read completion
123 * client sends sink rkey/addr/len
124 * server receives sink rkey/addr/len
125 * server rdma writes "pong" data to sink
126 * server sends "go ahead" on rdma write completion
131 * These states are used to signal events between the completion handler
132 * and the main client or server thread.
134 * Once CONNECTED, they cycle through RDMA_READ_ADV, RDMA_WRITE_ADV,
135 * and RDMA_WRITE_COMPLETE for each ping.
150 struct krping_rdma_info {
157 * Default max buffer size for IO...
159 #define RPING_BUFSIZE 128*1024
160 #define RPING_SQ_DEPTH 64
163 * Control block struct.
167 int server; /* 0 iff client */
173 struct ib_mr *dma_mr;
175 struct ib_fast_reg_page_list *page_list;
177 struct ib_send_wr fastreg_wr;
178 struct ib_send_wr invalidate_wr;
179 struct ib_mr *fastreg_mr;
180 int server_invalidate;
185 struct ib_mw_bind bind_attr;
187 struct ib_recv_wr rq_wr; /* recv work request record */
188 struct ib_sge recv_sgl; /* recv single SGE */
189 struct krping_rdma_info recv_buf;/* malloc'd buffer */
191 DECLARE_PCI_UNMAP_ADDR(recv_mapping)
192 struct ib_mr *recv_mr;
194 struct ib_send_wr sq_wr; /* send work requrest record */
195 struct ib_sge send_sgl;
196 struct krping_rdma_info send_buf;/* single send buf */
198 DECLARE_PCI_UNMAP_ADDR(send_mapping)
199 struct ib_mr *send_mr;
201 struct ib_send_wr rdma_sq_wr; /* rdma work request record */
202 struct ib_sge rdma_sgl; /* rdma single SGE */
203 char *rdma_buf; /* used as rdma sink */
205 DECLARE_PCI_UNMAP_ADDR(rdma_mapping)
206 struct ib_mr *rdma_mr;
208 uint32_t remote_rkey; /* remote guys RKEY */
209 uint64_t remote_addr; /* remote guys TO */
210 uint32_t remote_len; /* remote guys LEN */
212 char *start_buf; /* rdma read src */
214 DECLARE_PCI_UNMAP_ADDR(start_mapping)
215 struct ib_mr *start_mr;
217 enum test_state state; /* used for cond/signalling */
218 wait_queue_head_t sem;
219 struct krping_stats stats;
221 uint16_t port; /* dst port in NBO */
222 struct in_addr addr; /* dst addr in NBO */
223 char *addr_str; /* dst addr string */
224 int verbose; /* verbose logging */
225 int count; /* ping count */
226 int size; /* ping data size */
227 int validate; /* validate ping data */
228 int wlat; /* run wlat test */
229 int rlat; /* run rlat test */
230 int bw; /* run bw test */
231 int duplex; /* run bw full duplex test */
232 int poll; /* poll or block for rlat test */
233 int txdepth; /* SQ depth */
234 int local_dma_lkey; /* use 0 for lkey */
235 int frtest; /* fastreg test */
239 struct rdma_cm_id *cm_id; /* connection on client side,*/
240 /* listener on server side. */
241 struct rdma_cm_id *child_cm_id; /* connection on server side */
242 struct list_head list;
245 static int krping_cma_event_handler(struct rdma_cm_id *cma_id,
246 struct rdma_cm_event *event)
249 struct krping_cb *cb = cma_id->context;
251 DEBUG_LOG(cb, "cma_event type %d cma_id %p (%s)\n", event->event,
252 cma_id, (cma_id == cb->cm_id) ? "parent" : "child");
254 switch (event->event) {
255 case RDMA_CM_EVENT_ADDR_RESOLVED:
256 cb->state = ADDR_RESOLVED;
257 ret = rdma_resolve_route(cma_id, 2000);
259 PRINTF(cb, "rdma_resolve_route error %d\n", ret);
260 wake_up_interruptible(&cb->sem);
264 case RDMA_CM_EVENT_ROUTE_RESOLVED:
265 cb->state = ROUTE_RESOLVED;
266 cb->child_cm_id = cma_id;
267 wake_up_interruptible(&cb->sem);
270 case RDMA_CM_EVENT_CONNECT_REQUEST:
271 if (cb->state == IDLE) {
272 cb->state = CONNECT_REQUEST;
273 cb->child_cm_id = cma_id;
275 PRINTF(cb, "Received connection request in wrong state"
276 " (%d)\n", cb->state);
278 DEBUG_LOG(cb, "child cma %p\n", cb->child_cm_id);
279 wake_up_interruptible(&cb->sem);
282 case RDMA_CM_EVENT_ESTABLISHED:
283 DEBUG_LOG(cb, "ESTABLISHED\n");
285 cb->state = CONNECTED;
287 wake_up_interruptible(&cb->sem);
290 case RDMA_CM_EVENT_ADDR_ERROR:
291 case RDMA_CM_EVENT_ROUTE_ERROR:
292 case RDMA_CM_EVENT_CONNECT_ERROR:
293 case RDMA_CM_EVENT_UNREACHABLE:
294 case RDMA_CM_EVENT_REJECTED:
295 PRINTF(cb, "cma event %d, error %d\n", event->event,
298 wake_up_interruptible(&cb->sem);
301 case RDMA_CM_EVENT_DISCONNECTED:
302 PRINTF(cb, "DISCONNECT EVENT...\n");
304 wake_up_interruptible(&cb->sem);
307 case RDMA_CM_EVENT_DEVICE_REMOVAL:
308 PRINTF(cb, "cma detected device removal!!!!\n");
312 PRINTF(cb, "oof bad type!\n");
313 wake_up_interruptible(&cb->sem);
319 static int server_recv(struct krping_cb *cb, struct ib_wc *wc)
321 if (wc->byte_len != sizeof(cb->recv_buf)) {
322 PRINTF(cb, "Received bogus data, size %d\n",
327 cb->remote_rkey = ntohl(cb->recv_buf.rkey);
328 cb->remote_addr = ntohll(cb->recv_buf.buf);
329 cb->remote_len = ntohl(cb->recv_buf.size);
330 DEBUG_LOG(cb, "Received rkey %x addr %llx len %d from peer\n",
331 cb->remote_rkey, (unsigned long long)cb->remote_addr,
334 if (cb->state <= CONNECTED || cb->state == RDMA_WRITE_COMPLETE)
335 cb->state = RDMA_READ_ADV;
337 cb->state = RDMA_WRITE_ADV;
342 static int client_recv(struct krping_cb *cb, struct ib_wc *wc)
344 if (wc->byte_len != sizeof(cb->recv_buf)) {
345 PRINTF(cb, "Received bogus data, size %d\n",
350 if (cb->state == RDMA_READ_ADV)
351 cb->state = RDMA_WRITE_ADV;
353 cb->state = RDMA_WRITE_COMPLETE;
358 static void krping_cq_event_handler(struct ib_cq *cq, void *ctx)
360 struct krping_cb *cb = ctx;
362 struct ib_recv_wr *bad_wr;
365 BUG_ON(cb->cq != cq);
366 if (cb->state == ERROR) {
367 PRINTF(cb, "cq completion in ERROR state\n");
370 if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest)
371 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
372 while ((ret = ib_poll_cq(cb->cq, 1, &wc)) == 1) {
374 if (wc.status == IB_WC_WR_FLUSH_ERR) {
375 DEBUG_LOG(cb, "cq flushed\n");
378 PRINTF(cb, "cq completion failed with "
379 "wr_id %jx status %d opcode %d vender_err %x\n",
380 (uintmax_t)wc.wr_id, wc.status, wc.opcode, wc.vendor_err);
387 DEBUG_LOG(cb, "send completion\n");
388 cb->stats.send_bytes += cb->send_sgl.length;
389 cb->stats.send_msgs++;
392 case IB_WC_RDMA_WRITE:
393 DEBUG_LOG(cb, "rdma write completion\n");
394 cb->stats.write_bytes += cb->rdma_sq_wr.sg_list->length;
395 cb->stats.write_msgs++;
396 cb->state = RDMA_WRITE_COMPLETE;
397 wake_up_interruptible(&cb->sem);
400 case IB_WC_RDMA_READ:
401 DEBUG_LOG(cb, "rdma read completion\n");
402 cb->stats.read_bytes += cb->rdma_sq_wr.sg_list->length;
403 cb->stats.read_msgs++;
404 cb->state = RDMA_READ_COMPLETE;
405 wake_up_interruptible(&cb->sem);
409 DEBUG_LOG(cb, "recv completion\n");
410 cb->stats.recv_bytes += sizeof(cb->recv_buf);
411 cb->stats.recv_msgs++;
412 if (cb->wlat || cb->rlat || cb->bw || cb->frtest)
413 ret = server_recv(cb, &wc);
415 ret = cb->server ? server_recv(cb, &wc) :
416 client_recv(cb, &wc);
418 PRINTF(cb, "recv wc error: %d\n", ret);
422 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
424 PRINTF(cb, "post recv error: %d\n",
428 wake_up_interruptible(&cb->sem);
433 "%s:%d Unexpected opcode %d, Shutting down\n",
434 __func__, __LINE__, wc.opcode);
439 PRINTF(cb, "poll error %d\n", ret);
445 wake_up_interruptible(&cb->sem);
448 static int krping_accept(struct krping_cb *cb)
450 struct rdma_conn_param conn_param;
453 DEBUG_LOG(cb, "accepting client connection request\n");
455 memset(&conn_param, 0, sizeof conn_param);
456 conn_param.responder_resources = 1;
457 conn_param.initiator_depth = 1;
459 ret = rdma_accept(cb->child_cm_id, &conn_param);
461 PRINTF(cb, "rdma_accept error: %d\n", ret);
465 if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) {
466 wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
467 if (cb->state == ERROR) {
468 PRINTF(cb, "wait for CONNECTED state %d\n",
476 static void krping_setup_wr(struct krping_cb *cb)
478 cb->recv_sgl.addr = cb->recv_dma_addr;
479 cb->recv_sgl.length = sizeof cb->recv_buf;
480 if (cb->local_dma_lkey)
481 cb->recv_sgl.lkey = cb->qp->device->local_dma_lkey;
482 else if (cb->mem == DMA)
483 cb->recv_sgl.lkey = cb->dma_mr->lkey;
485 cb->recv_sgl.lkey = cb->recv_mr->lkey;
486 cb->rq_wr.sg_list = &cb->recv_sgl;
487 cb->rq_wr.num_sge = 1;
489 cb->send_sgl.addr = cb->send_dma_addr;
490 cb->send_sgl.length = sizeof cb->send_buf;
491 if (cb->local_dma_lkey)
492 cb->send_sgl.lkey = cb->qp->device->local_dma_lkey;
493 else if (cb->mem == DMA)
494 cb->send_sgl.lkey = cb->dma_mr->lkey;
496 cb->send_sgl.lkey = cb->send_mr->lkey;
498 cb->sq_wr.opcode = IB_WR_SEND;
499 cb->sq_wr.send_flags = IB_SEND_SIGNALED;
500 cb->sq_wr.sg_list = &cb->send_sgl;
501 cb->sq_wr.num_sge = 1;
503 if (cb->server || cb->wlat || cb->rlat || cb->bw || cb->frtest) {
504 cb->rdma_sgl.addr = cb->rdma_dma_addr;
506 cb->rdma_sgl.lkey = cb->rdma_mr->lkey;
507 cb->rdma_sq_wr.send_flags = IB_SEND_SIGNALED;
508 cb->rdma_sq_wr.sg_list = &cb->rdma_sgl;
509 cb->rdma_sq_wr.num_sge = 1;
516 * A chain of 2 WRs, INVALDATE_MR + FAST_REG_MR.
517 * both unsignaled. The client uses them to reregister
518 * the rdma buffers with a new key each iteration.
520 cb->fastreg_wr.opcode = IB_WR_FAST_REG_MR;
521 cb->fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
522 cb->fastreg_wr.wr.fast_reg.length = cb->size;
523 cb->fastreg_wr.wr.fast_reg.page_list = cb->page_list;
524 cb->fastreg_wr.wr.fast_reg.page_list_len = cb->page_list_len;
526 cb->invalidate_wr.next = &cb->fastreg_wr;
527 cb->invalidate_wr.opcode = IB_WR_LOCAL_INV;
530 cb->bind_attr.wr_id = 0xabbaabba;
531 cb->bind_attr.send_flags = 0; /* unsignaled */
533 cb->bind_attr.bind_info.length = cb->size;
535 cb->bind_attr.length = cb->size;
543 static int krping_setup_buffers(struct krping_cb *cb)
546 struct ib_phys_buf buf;
549 DEBUG_LOG(cb, "krping_setup_buffers called on cb %p\n", cb);
551 cb->recv_dma_addr = ib_dma_map_single(cb->pd->device,
553 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
554 pci_unmap_addr_set(cb, recv_mapping, cb->recv_dma_addr);
555 cb->send_dma_addr = ib_dma_map_single(cb->pd->device,
556 &cb->send_buf, sizeof(cb->send_buf),
558 pci_unmap_addr_set(cb, send_mapping, cb->send_dma_addr);
560 if (cb->mem == DMA) {
561 cb->dma_mr = ib_get_dma_mr(cb->pd, IB_ACCESS_LOCAL_WRITE|
562 IB_ACCESS_REMOTE_READ|
563 IB_ACCESS_REMOTE_WRITE);
564 if (IS_ERR(cb->dma_mr)) {
565 DEBUG_LOG(cb, "reg_dmamr failed\n");
566 ret = PTR_ERR(cb->dma_mr);
570 if (!cb->local_dma_lkey) {
571 buf.addr = cb->recv_dma_addr;
572 buf.size = sizeof cb->recv_buf;
573 DEBUG_LOG(cb, "recv buf dma_addr %jx size %d\n",
574 (uintmax_t)buf.addr, (int)buf.size);
575 iovbase = cb->recv_dma_addr;
576 cb->recv_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
577 IB_ACCESS_LOCAL_WRITE,
580 if (IS_ERR(cb->recv_mr)) {
581 DEBUG_LOG(cb, "recv_buf reg_mr failed\n");
582 ret = PTR_ERR(cb->recv_mr);
586 buf.addr = cb->send_dma_addr;
587 buf.size = sizeof cb->send_buf;
588 DEBUG_LOG(cb, "send buf dma_addr %jx size %d\n",
589 (uintmax_t)buf.addr, (int)buf.size);
590 iovbase = cb->send_dma_addr;
591 cb->send_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
594 if (IS_ERR(cb->send_mr)) {
595 DEBUG_LOG(cb, "send_buf reg_mr failed\n");
596 ret = PTR_ERR(cb->send_mr);
602 cb->rdma_buf = kmalloc(cb->size, GFP_KERNEL);
604 DEBUG_LOG(cb, "rdma_buf malloc failed\n");
609 cb->rdma_dma_addr = ib_dma_map_single(cb->pd->device,
610 cb->rdma_buf, cb->size,
612 pci_unmap_addr_set(cb, rdma_mapping, cb->rdma_dma_addr);
613 if (cb->mem != DMA) {
616 cb->page_list_len = (((cb->size - 1) & PAGE_MASK) +
617 PAGE_SIZE) >> PAGE_SHIFT;
618 cb->page_list = ib_alloc_fast_reg_page_list(
621 if (IS_ERR(cb->page_list)) {
622 DEBUG_LOG(cb, "recv_buf reg_mr failed\n");
623 ret = PTR_ERR(cb->page_list);
626 cb->fastreg_mr = ib_alloc_fast_reg_mr(cb->pd,
627 cb->page_list->max_page_list_len);
628 if (IS_ERR(cb->fastreg_mr)) {
629 DEBUG_LOG(cb, "recv_buf reg_mr failed\n");
630 ret = PTR_ERR(cb->fastreg_mr);
633 DEBUG_LOG(cb, "fastreg rkey 0x%x page_list %p"
634 " page_list_len %u\n", cb->fastreg_mr->rkey,
635 cb->page_list, cb->page_list_len);
638 cb->mw = ib_alloc_mw(cb->pd,IB_MW_TYPE_1);
639 if (IS_ERR(cb->mw)) {
640 DEBUG_LOG(cb, "recv_buf alloc_mw failed\n");
641 ret = PTR_ERR(cb->mw);
644 DEBUG_LOG(cb, "mw rkey 0x%x\n", cb->mw->rkey);
647 buf.addr = cb->rdma_dma_addr;
649 iovbase = cb->rdma_dma_addr;
650 cb->rdma_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
651 IB_ACCESS_LOCAL_WRITE|
652 IB_ACCESS_REMOTE_READ|
653 IB_ACCESS_REMOTE_WRITE,
655 if (IS_ERR(cb->rdma_mr)) {
656 DEBUG_LOG(cb, "rdma_buf reg_mr failed\n");
657 ret = PTR_ERR(cb->rdma_mr);
660 DEBUG_LOG(cb, "rdma buf dma_addr %jx size %d mr rkey 0x%x\n",
661 (uintmax_t)buf.addr, (int)buf.size, cb->rdma_mr->rkey);
670 if (!cb->server || cb->wlat || cb->rlat || cb->bw || cb->frtest) {
672 cb->start_buf = kmalloc(cb->size, GFP_KERNEL);
673 if (!cb->start_buf) {
674 DEBUG_LOG(cb, "start_buf malloc failed\n");
679 cb->start_dma_addr = ib_dma_map_single(cb->pd->device,
680 cb->start_buf, cb->size,
682 pci_unmap_addr_set(cb, start_mapping, cb->start_dma_addr);
684 if (cb->mem == MR || cb->mem == MW) {
685 unsigned flags = IB_ACCESS_REMOTE_READ;
687 if (cb->wlat || cb->rlat || cb->bw || cb->frtest) {
688 flags |= IB_ACCESS_LOCAL_WRITE |
689 IB_ACCESS_REMOTE_WRITE;
692 buf.addr = cb->start_dma_addr;
694 DEBUG_LOG(cb, "start buf dma_addr %jx size %d\n",
695 (uintmax_t)buf.addr, (int)buf.size);
696 iovbase = cb->start_dma_addr;
697 cb->start_mr = ib_reg_phys_mr(cb->pd, &buf, 1,
701 if (IS_ERR(cb->start_mr)) {
702 DEBUG_LOG(cb, "start_buf reg_mr failed\n");
703 ret = PTR_ERR(cb->start_mr);
710 DEBUG_LOG(cb, "allocated & registered buffers...\n");
713 if (cb->fastreg_mr && !IS_ERR(cb->fastreg_mr))
714 ib_dereg_mr(cb->fastreg_mr);
715 if (cb->mw && !IS_ERR(cb->mw))
716 ib_dealloc_mw(cb->mw);
717 if (cb->rdma_mr && !IS_ERR(cb->rdma_mr))
718 ib_dereg_mr(cb->rdma_mr);
719 if (cb->page_list && !IS_ERR(cb->page_list))
720 ib_free_fast_reg_page_list(cb->page_list);
721 if (cb->dma_mr && !IS_ERR(cb->dma_mr))
722 ib_dereg_mr(cb->dma_mr);
723 if (cb->recv_mr && !IS_ERR(cb->recv_mr))
724 ib_dereg_mr(cb->recv_mr);
725 if (cb->send_mr && !IS_ERR(cb->send_mr))
726 ib_dereg_mr(cb->send_mr);
730 kfree(cb->start_buf);
734 static void krping_free_buffers(struct krping_cb *cb)
736 DEBUG_LOG(cb, "krping_free_buffers called on cb %p\n", cb);
739 ib_dereg_mr(cb->dma_mr);
741 ib_dereg_mr(cb->send_mr);
743 ib_dereg_mr(cb->recv_mr);
745 ib_dereg_mr(cb->rdma_mr);
747 ib_dereg_mr(cb->start_mr);
749 ib_dereg_mr(cb->fastreg_mr);
751 ib_dealloc_mw(cb->mw);
753 dma_unmap_single(cb->pd->device->dma_device,
754 pci_unmap_addr(cb, recv_mapping),
755 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
756 dma_unmap_single(cb->pd->device->dma_device,
757 pci_unmap_addr(cb, send_mapping),
758 sizeof(cb->send_buf), DMA_BIDIRECTIONAL);
759 dma_unmap_single(cb->pd->device->dma_device,
760 pci_unmap_addr(cb, rdma_mapping),
761 cb->size, DMA_BIDIRECTIONAL);
764 dma_unmap_single(cb->pd->device->dma_device,
765 pci_unmap_addr(cb, start_mapping),
766 cb->size, DMA_BIDIRECTIONAL);
767 kfree(cb->start_buf);
771 static int krping_create_qp(struct krping_cb *cb)
773 struct ib_qp_init_attr init_attr;
776 memset(&init_attr, 0, sizeof(init_attr));
777 init_attr.cap.max_send_wr = cb->txdepth;
778 init_attr.cap.max_recv_wr = 2;
779 init_attr.cap.max_recv_sge = 1;
780 init_attr.cap.max_send_sge = 1;
781 init_attr.qp_type = IB_QPT_RC;
782 init_attr.send_cq = cb->cq;
783 init_attr.recv_cq = cb->cq;
784 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
787 ret = rdma_create_qp(cb->child_cm_id, cb->pd, &init_attr);
789 cb->qp = cb->child_cm_id->qp;
791 ret = rdma_create_qp(cb->cm_id, cb->pd, &init_attr);
793 cb->qp = cb->cm_id->qp;
799 static void krping_free_qp(struct krping_cb *cb)
801 ib_destroy_qp(cb->qp);
802 ib_destroy_cq(cb->cq);
803 ib_dealloc_pd(cb->pd);
806 static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
809 cb->pd = ib_alloc_pd(cm_id->device);
810 if (IS_ERR(cb->pd)) {
811 PRINTF(cb, "ib_alloc_pd failed\n");
812 return PTR_ERR(cb->pd);
814 DEBUG_LOG(cb, "created pd %p\n", cb->pd);
816 strlcpy(cb->stats.name, cb->pd->device->name, sizeof(cb->stats.name));
818 cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL,
819 cb, cb->txdepth * 2, 0);
820 if (IS_ERR(cb->cq)) {
821 PRINTF(cb, "ib_create_cq failed\n");
822 ret = PTR_ERR(cb->cq);
825 DEBUG_LOG(cb, "created cq %p\n", cb->cq);
827 if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) {
828 ret = ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
830 PRINTF(cb, "ib_create_cq failed\n");
835 ret = krping_create_qp(cb);
837 PRINTF(cb, "krping_create_qp failed: %d\n", ret);
840 DEBUG_LOG(cb, "created qp %p\n", cb->qp);
843 ib_destroy_cq(cb->cq);
845 ib_dealloc_pd(cb->pd);
850 * return the (possibly rebound) rkey for the rdma buffer.
851 * FASTREG mode: invalidate and rebind via fastreg wr.
852 * MW mode: rebind the MW.
853 * other modes: just return the mr rkey.
855 static u32 krping_rdma_rkey(struct krping_cb *cb, u64 buf, int post_inv)
857 u32 rkey = 0xffffffff;
859 struct ib_send_wr *bad_wr;
865 cb->invalidate_wr.ex.invalidate_rkey = cb->fastreg_mr->rkey;
868 * Update the fastreg key.
870 ib_update_fast_reg_key(cb->fastreg_mr, ++cb->key);
871 cb->fastreg_wr.wr.fast_reg.rkey = cb->fastreg_mr->rkey;
874 * Update the fastreg WR with new buf info.
876 if (buf == (u64)cb->start_dma_addr)
877 cb->fastreg_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_READ;
879 cb->fastreg_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
880 cb->fastreg_wr.wr.fast_reg.iova_start = buf;
881 p = (u64)(buf & PAGE_MASK);
882 for (i=0; i < cb->fastreg_wr.wr.fast_reg.page_list_len;
883 i++, p += PAGE_SIZE) {
884 cb->page_list->page_list[i] = p;
885 DEBUG_LOG(cb, "page_list[%d] 0x%jx\n", i, (uintmax_t)p);
888 DEBUG_LOG(cb, "post_inv = %d, fastreg new rkey 0x%x shift %u len %u"
889 " iova_start %jx page_list_len %u\n",
891 cb->fastreg_wr.wr.fast_reg.rkey,
892 cb->fastreg_wr.wr.fast_reg.page_shift,
893 (unsigned)cb->fastreg_wr.wr.fast_reg.length,
894 (uintmax_t)cb->fastreg_wr.wr.fast_reg.iova_start,
895 cb->fastreg_wr.wr.fast_reg.page_list_len);
898 ret = ib_post_send(cb->qp, &cb->invalidate_wr, &bad_wr);
900 ret = ib_post_send(cb->qp, &cb->fastreg_wr, &bad_wr);
902 PRINTF(cb, "post send error %d\n", ret);
905 rkey = cb->fastreg_mr->rkey;
909 * Update the MW with new buf info.
911 if (buf == (u64)cb->start_dma_addr) {
913 cb->bind_attr.bind_info.mw_access_flags = IB_ACCESS_REMOTE_READ;
914 cb->bind_attr.bind_info.mr = cb->start_mr;
916 cb->bind_attr.mw_access_flags = IB_ACCESS_REMOTE_READ;
917 cb->bind_attr.mr = cb->start_mr;
921 cb->bind_attr.bind_info.mw_access_flags = IB_ACCESS_REMOTE_WRITE;
922 cb->bind_attr.bind_info.mr = cb->rdma_mr;
924 cb->bind_attr.mw_access_flags = IB_ACCESS_REMOTE_WRITE;
925 cb->bind_attr.mr = cb->rdma_mr;
929 cb->bind_attr.bind_info.addr = buf;
931 cb->bind_attr.addr = buf;
933 DEBUG_LOG(cb, "binding mw rkey 0x%x to buf %jx mr rkey 0x%x\n",
935 cb->mw->rkey, (uintmax_t)buf, cb->bind_attr.bind_info.mr->rkey);
937 cb->mw->rkey, buf, cb->bind_attr.mr->rkey);
939 ret = ib_bind_mw(cb->qp, cb->mw, &cb->bind_attr);
941 PRINTF(cb, "bind mw error %d\n", ret);
947 if (buf == (u64)cb->start_dma_addr)
948 rkey = cb->start_mr->rkey;
950 rkey = cb->rdma_mr->rkey;
953 rkey = cb->dma_mr->rkey;
956 PRINTF(cb, "%s:%d case ERROR\n", __func__, __LINE__);
963 static void krping_format_send(struct krping_cb *cb, u64 buf)
965 struct krping_rdma_info *info = &cb->send_buf;
969 * Client side will do fastreg or mw bind before
970 * advertising the rdma buffer. Server side
971 * sends have no data.
973 if (!cb->server || cb->wlat || cb->rlat || cb->bw || cb->frtest) {
974 rkey = krping_rdma_rkey(cb, buf, !cb->server_invalidate);
975 info->buf = htonll(buf);
976 info->rkey = htonl(rkey);
977 info->size = htonl(cb->size);
978 DEBUG_LOG(cb, "RDMA addr %llx rkey %x len %d\n",
979 (unsigned long long)buf, rkey, cb->size);
983 static void krping_test_server(struct krping_cb *cb)
985 struct ib_send_wr *bad_wr, inv;
989 /* Wait for client's Start STAG/TO/Len */
990 wait_event_interruptible(cb->sem, cb->state >= RDMA_READ_ADV);
991 if (cb->state != RDMA_READ_ADV) {
992 PRINTF(cb, "wait for RDMA_READ_ADV state %d\n",
997 DEBUG_LOG(cb, "server received sink adv\n");
999 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1000 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1001 cb->rdma_sq_wr.sg_list->length = cb->remote_len;
1002 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 1);
1004 /* Issue RDMA Read. */
1006 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
1009 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ;
1010 if (cb->mem == FASTREG) {
1012 * Immediately follow the read with a
1015 cb->rdma_sq_wr.next = &inv;
1016 memset(&inv, 0, sizeof inv);
1017 inv.opcode = IB_WR_LOCAL_INV;
1018 inv.ex.invalidate_rkey = cb->fastreg_mr->rkey;
1019 inv.send_flags = IB_SEND_FENCE;
1023 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr);
1025 PRINTF(cb, "post send error %d\n", ret);
1028 cb->rdma_sq_wr.next = NULL;
1030 DEBUG_LOG(cb, "server posted rdma read req \n");
1032 /* Wait for read completion */
1033 wait_event_interruptible(cb->sem,
1034 cb->state >= RDMA_READ_COMPLETE);
1035 if (cb->state != RDMA_READ_COMPLETE) {
1037 "wait for RDMA_READ_COMPLETE state %d\n",
1041 DEBUG_LOG(cb, "server received read complete\n");
1043 /* Display data in recv buf */
1045 if (strlen(cb->rdma_buf) > 128) {
1048 strlcpy(msgbuf, cb->rdma_buf, sizeof(msgbuf));
1049 PRINTF(cb, "server ping data stripped: %s\n",
1052 PRINTF(cb, "server ping data: %s\n",
1056 /* Tell client to continue */
1057 if (cb->server && cb->server_invalidate) {
1058 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
1059 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
1060 DEBUG_LOG(cb, "send-w-inv rkey 0x%x\n", cb->remote_rkey);
1062 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1064 PRINTF(cb, "post send error %d\n", ret);
1067 DEBUG_LOG(cb, "server posted go ahead\n");
1069 /* Wait for client's RDMA STAG/TO/Len */
1070 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
1071 if (cb->state != RDMA_WRITE_ADV) {
1073 "wait for RDMA_WRITE_ADV state %d\n",
1077 DEBUG_LOG(cb, "server received sink adv\n");
1079 /* RDMA Write echo data */
1080 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1081 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1082 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1083 cb->rdma_sq_wr.sg_list->length = strlen(cb->rdma_buf) + 1;
1084 if (cb->local_dma_lkey)
1085 cb->rdma_sgl.lkey = cb->qp->device->local_dma_lkey;
1087 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 0);
1089 DEBUG_LOG(cb, "rdma write from lkey %x laddr %llx len %d\n",
1090 cb->rdma_sq_wr.sg_list->lkey,
1091 (unsigned long long)cb->rdma_sq_wr.sg_list->addr,
1092 cb->rdma_sq_wr.sg_list->length);
1094 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr);
1096 PRINTF(cb, "post send error %d\n", ret);
1100 /* Wait for completion */
1101 ret = wait_event_interruptible(cb->sem, cb->state >=
1102 RDMA_WRITE_COMPLETE);
1103 if (cb->state != RDMA_WRITE_COMPLETE) {
1105 "wait for RDMA_WRITE_COMPLETE state %d\n",
1109 DEBUG_LOG(cb, "server rdma write complete \n");
1111 cb->state = CONNECTED;
1113 /* Tell client to begin again */
1114 if (cb->server && cb->server_invalidate) {
1115 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
1116 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
1117 DEBUG_LOG(cb, "send-w-inv rkey 0x%x\n", cb->remote_rkey);
1119 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1121 PRINTF(cb, "post send error %d\n", ret);
1124 DEBUG_LOG(cb, "server posted go ahead\n");
1128 static void rlat_test(struct krping_cb *cb)
1131 int iters = cb->count;
1132 struct timeval start_tv, stop_tv;
1135 struct ib_send_wr *bad_wr;
1139 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ;
1140 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1141 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1142 cb->rdma_sq_wr.sg_list->length = cb->size;
1144 microtime(&start_tv);
1146 cb->state = RDMA_READ_ADV;
1147 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
1149 while (scnt < iters) {
1151 cb->state = RDMA_READ_ADV;
1152 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr);
1155 "Couldn't post send: ret=%d scnt %d\n",
1162 wait_event_interruptible(cb->sem,
1163 cb->state != RDMA_READ_ADV);
1164 if (cb->state == RDMA_READ_COMPLETE) {
1166 ib_req_notify_cq(cb->cq,
1172 ne = ib_poll_cq(cb->cq, 1, &wc);
1173 if (cb->state == ERROR) {
1175 "state == ERROR...bailing scnt %d\n",
1182 PRINTF(cb, "poll CQ failed %d\n", ne);
1185 if (cb->poll && wc.status != IB_WC_SUCCESS) {
1186 PRINTF(cb, "Completion wth error at %s:\n",
1187 cb->server ? "server" : "client");
1188 PRINTF(cb, "Failed status %d: wr_id %d\n",
1189 wc.status, (int) wc.wr_id);
1194 microtime(&stop_tv);
1196 if (stop_tv.tv_usec < start_tv.tv_usec) {
1197 stop_tv.tv_usec += 1000000;
1198 stop_tv.tv_sec -= 1;
1201 PRINTF(cb, "delta sec %lu delta usec %lu iter %d size %d\n",
1202 (unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1203 (unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1207 static void wlat_test(struct krping_cb *cb)
1209 int ccnt, scnt, rcnt;
1210 int iters=cb->count;
1211 volatile char *poll_buf = (char *) cb->start_buf;
1212 char *buf = (char *)cb->rdma_buf;
1213 struct timeval start_tv, stop_tv;
1214 cycles_t *post_cycles_start, *post_cycles_stop;
1215 cycles_t *poll_cycles_start, *poll_cycles_stop;
1216 cycles_t *last_poll_cycles_start;
1217 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1219 int cycle_iters = 1000;
1225 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1226 if (!post_cycles_start) {
1227 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1230 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1231 if (!post_cycles_stop) {
1232 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1235 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1236 if (!poll_cycles_start) {
1237 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1240 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1241 if (!poll_cycles_stop) {
1242 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1245 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1247 if (!last_poll_cycles_start) {
1248 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1251 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1252 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1253 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1254 cb->rdma_sq_wr.sg_list->length = cb->size;
1256 if (cycle_iters > iters)
1257 cycle_iters = iters;
1258 microtime(&start_tv);
1259 while (scnt < iters || ccnt < iters || rcnt < iters) {
1261 /* Wait till buffer changes. */
1262 if (rcnt < iters && !(scnt < 1 && !cb->server)) {
1264 while (*poll_buf != (char)rcnt) {
1265 if (cb->state == ERROR) {
1267 "state = ERROR, bailing\n");
1274 struct ib_send_wr *bad_wr;
1276 *buf = (char)scnt+1;
1277 if (scnt < cycle_iters)
1278 post_cycles_start[scnt] = get_cycles();
1279 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) {
1281 "Couldn't post send: scnt=%d\n",
1285 if (scnt < cycle_iters)
1286 post_cycles_stop[scnt] = get_cycles();
1294 if (ccnt < cycle_iters)
1295 poll_cycles_start[ccnt] = get_cycles();
1297 if (ccnt < cycle_iters)
1298 last_poll_cycles_start[ccnt] =
1300 ne = ib_poll_cq(cb->cq, 1, &wc);
1302 if (ccnt < cycle_iters)
1303 poll_cycles_stop[ccnt] = get_cycles();
1307 PRINTF(cb, "poll CQ failed %d\n", ne);
1310 if (wc.status != IB_WC_SUCCESS) {
1312 "Completion wth error at %s:\n",
1313 cb->server ? "server" : "client");
1315 "Failed status %d: wr_id %d\n",
1316 wc.status, (int) wc.wr_id);
1318 "scnt=%d, rcnt=%d, ccnt=%d\n",
1324 microtime(&stop_tv);
1326 if (stop_tv.tv_usec < start_tv.tv_usec) {
1327 stop_tv.tv_usec += 1000000;
1328 stop_tv.tv_sec -= 1;
1331 for (i=0; i < cycle_iters; i++) {
1332 sum_post += post_cycles_stop[i] - post_cycles_start[i];
1333 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1334 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1337 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1338 " sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1339 (unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1340 (unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1341 scnt, cb->size, cycle_iters,
1342 (unsigned long long)sum_post, (unsigned long long)sum_poll,
1343 (unsigned long long)sum_last_poll);
1344 kfree(post_cycles_start);
1345 kfree(post_cycles_stop);
1346 kfree(poll_cycles_start);
1347 kfree(poll_cycles_stop);
1348 kfree(last_poll_cycles_start);
1351 static void bw_test(struct krping_cb *cb)
1353 int ccnt, scnt, rcnt;
1354 int iters=cb->count;
1355 struct timeval start_tv, stop_tv;
1356 cycles_t *post_cycles_start, *post_cycles_stop;
1357 cycles_t *poll_cycles_start, *poll_cycles_stop;
1358 cycles_t *last_poll_cycles_start;
1359 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1361 int cycle_iters = 1000;
1367 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1368 if (!post_cycles_start) {
1369 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1372 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1373 if (!post_cycles_stop) {
1374 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1377 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1378 if (!poll_cycles_start) {
1379 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1382 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1383 if (!poll_cycles_stop) {
1384 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1387 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1389 if (!last_poll_cycles_start) {
1390 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__);
1393 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
1394 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
1395 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
1396 cb->rdma_sq_wr.sg_list->length = cb->size;
1398 if (cycle_iters > iters)
1399 cycle_iters = iters;
1400 microtime(&start_tv);
1401 while (scnt < iters || ccnt < iters) {
1403 while (scnt < iters && scnt - ccnt < cb->txdepth) {
1404 struct ib_send_wr *bad_wr;
1406 if (scnt < cycle_iters)
1407 post_cycles_start[scnt] = get_cycles();
1408 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) {
1410 "Couldn't post send: scnt=%d\n",
1414 if (scnt < cycle_iters)
1415 post_cycles_stop[scnt] = get_cycles();
1423 if (ccnt < cycle_iters)
1424 poll_cycles_start[ccnt] = get_cycles();
1426 if (ccnt < cycle_iters)
1427 last_poll_cycles_start[ccnt] =
1429 ne = ib_poll_cq(cb->cq, 1, &wc);
1431 if (ccnt < cycle_iters)
1432 poll_cycles_stop[ccnt] = get_cycles();
1436 PRINTF(cb, "poll CQ failed %d\n", ne);
1439 if (wc.status != IB_WC_SUCCESS) {
1441 "Completion wth error at %s:\n",
1442 cb->server ? "server" : "client");
1444 "Failed status %d: wr_id %d\n",
1445 wc.status, (int) wc.wr_id);
1450 microtime(&stop_tv);
1452 if (stop_tv.tv_usec < start_tv.tv_usec) {
1453 stop_tv.tv_usec += 1000000;
1454 stop_tv.tv_sec -= 1;
1457 for (i=0; i < cycle_iters; i++) {
1458 sum_post += post_cycles_stop[i] - post_cycles_start[i];
1459 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1460 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1463 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1464 " sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1465 (unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1466 (unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1467 scnt, cb->size, cycle_iters,
1468 (unsigned long long)sum_post, (unsigned long long)sum_poll,
1469 (unsigned long long)sum_last_poll);
1470 kfree(post_cycles_start);
1471 kfree(post_cycles_stop);
1472 kfree(poll_cycles_start);
1473 kfree(poll_cycles_stop);
1474 kfree(last_poll_cycles_start);
1477 static void krping_rlat_test_server(struct krping_cb *cb)
1479 struct ib_send_wr *bad_wr;
1483 /* Spin waiting for client's Start STAG/TO/Len */
1484 while (cb->state < RDMA_READ_ADV) {
1485 krping_cq_event_handler(cb->cq, cb);
1488 /* Send STAG/TO/Len to client */
1489 krping_format_send(cb, cb->start_dma_addr);
1490 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1492 PRINTF(cb, "post send error %d\n", ret);
1496 /* Spin waiting for send completion */
1497 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1499 PRINTF(cb, "poll error %d\n", ret);
1503 PRINTF(cb, "send completiong error %d\n", wc.status);
1506 wait_event_interruptible(cb->sem, cb->state == ERROR);
1509 static void krping_wlat_test_server(struct krping_cb *cb)
1511 struct ib_send_wr *bad_wr;
1515 /* Spin waiting for client's Start STAG/TO/Len */
1516 while (cb->state < RDMA_READ_ADV) {
1517 krping_cq_event_handler(cb->cq, cb);
1520 /* Send STAG/TO/Len to client */
1521 krping_format_send(cb, cb->start_dma_addr);
1522 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1524 PRINTF(cb, "post send error %d\n", ret);
1528 /* Spin waiting for send completion */
1529 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1531 PRINTF(cb, "poll error %d\n", ret);
1535 PRINTF(cb, "send completiong error %d\n", wc.status);
1540 wait_event_interruptible(cb->sem, cb->state == ERROR);
1543 static void krping_bw_test_server(struct krping_cb *cb)
1545 struct ib_send_wr *bad_wr;
1549 /* Spin waiting for client's Start STAG/TO/Len */
1550 while (cb->state < RDMA_READ_ADV) {
1551 krping_cq_event_handler(cb->cq, cb);
1554 /* Send STAG/TO/Len to client */
1555 krping_format_send(cb, cb->start_dma_addr);
1556 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1558 PRINTF(cb, "post send error %d\n", ret);
1562 /* Spin waiting for send completion */
1563 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1565 PRINTF(cb, "poll error %d\n", ret);
1569 PRINTF(cb, "send completiong error %d\n", wc.status);
1575 wait_event_interruptible(cb->sem, cb->state == ERROR);
1578 static int fastreg_supported(struct krping_cb *cb, int server)
1580 struct ib_device *dev = server?cb->child_cm_id->device:
1582 struct ib_device_attr attr;
1585 ret = ib_query_device(dev, &attr);
1587 PRINTF(cb, "ib_query_device failed ret %d\n", ret);
1590 if (!(attr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
1591 PRINTF(cb, "Fastreg not supported - device_cap_flags 0x%llx\n",
1592 (unsigned long long)attr.device_cap_flags);
1595 DEBUG_LOG(cb, "Fastreg supported - device_cap_flags 0x%jx\n",
1596 (uintmax_t)attr.device_cap_flags);
1600 static int krping_bind_server(struct krping_cb *cb)
1602 struct sockaddr_in sin;
1605 memset(&sin, 0, sizeof(sin));
1606 sin.sin_len = sizeof sin;
1607 sin.sin_family = AF_INET;
1608 sin.sin_addr.s_addr = cb->addr.s_addr;
1609 sin.sin_port = cb->port;
1611 ret = rdma_bind_addr(cb->cm_id, (struct sockaddr *) &sin);
1613 PRINTF(cb, "rdma_bind_addr error %d\n", ret);
1616 DEBUG_LOG(cb, "rdma_bind_addr successful\n");
1618 DEBUG_LOG(cb, "rdma_listen\n");
1619 ret = rdma_listen(cb->cm_id, 3);
1621 PRINTF(cb, "rdma_listen failed: %d\n", ret);
1625 wait_event_interruptible(cb->sem, cb->state >= CONNECT_REQUEST);
1626 if (cb->state != CONNECT_REQUEST) {
1627 PRINTF(cb, "wait for CONNECT_REQUEST state %d\n",
1632 if (cb->mem == FASTREG && !fastreg_supported(cb, 1))
1639 * sq-depth worth of fastreg + 0B read-inv pairs, reposting them as the reads
1641 * NOTE: every 9 seconds we sleep for 1 second to keep the kernel happy.
1643 static void krping_fr_test5(struct krping_cb *cb)
1645 struct ib_fast_reg_page_list **pl;
1646 struct ib_send_wr *fr, *read, *bad;
1652 dma_addr_t *dma_addr;
1655 int plen = (((cb->size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1659 int depth = cb->txdepth >> 1;
1662 PRINTF(cb, "txdepth must be > 1 for this test!\n");
1666 pl = kzalloc(sizeof *pl * depth, GFP_KERNEL);
1667 DEBUG_LOG(cb, "%s pl %p size %zu\n", __func__, pl, sizeof *pl * depth);
1668 mr = kzalloc(sizeof *mr * depth, GFP_KERNEL);
1669 DEBUG_LOG(cb, "%s mr %p size %zu\n", __func__, mr, sizeof *mr * depth);
1670 fr = kzalloc(sizeof *fr * depth, GFP_KERNEL);
1671 DEBUG_LOG(cb, "%s fr %p size %zu\n", __func__, fr, sizeof *fr * depth);
1672 sgl = kzalloc(sizeof *sgl * depth, GFP_KERNEL);
1673 DEBUG_LOG(cb, "%s sgl %p size %zu\n", __func__, sgl, sizeof *sgl * depth);
1674 read = kzalloc(sizeof *read * depth, GFP_KERNEL);
1675 DEBUG_LOG(cb, "%s read %p size %zu\n", __func__, read, sizeof *read * depth);
1676 buf = kzalloc(sizeof *buf * depth, GFP_KERNEL);
1677 DEBUG_LOG(cb, "%s buf %p size %zu\n", __func__, buf, sizeof *buf * depth);
1678 dma_addr = kzalloc(sizeof *dma_addr * depth, GFP_KERNEL);
1679 DEBUG_LOG(cb, "%s dma_addr %p size %zu\n", __func__, dma_addr, sizeof *dma_addr * depth);
1680 if (!pl || !mr || !fr || !read || !sgl || !buf || !dma_addr) {
1681 PRINTF(cb, "kzalloc failed\n");
1685 for (scnt = 0; scnt < depth; scnt++) {
1686 pl[scnt] = ib_alloc_fast_reg_page_list(cb->qp->device, plen);
1687 if (IS_ERR(pl[scnt])) {
1688 PRINTF(cb, "alloc_fr_page_list failed %ld\n",
1692 DEBUG_LOG(cb, "%s pl[%u] %p\n", __func__, scnt, pl[scnt]);
1694 mr[scnt] = ib_alloc_fast_reg_mr(cb->pd, plen);
1695 if (IS_ERR(mr[scnt])) {
1696 PRINTF(cb, "alloc_fr failed %ld\n",
1700 DEBUG_LOG(cb, "%s mr[%u] %p\n", __func__, scnt, mr[scnt]);
1701 ib_update_fast_reg_key(mr[scnt], ++key);
1703 buf[scnt] = kmalloc(cb->size, GFP_KERNEL);
1705 PRINTF(cb, "kmalloc failed\n");
1709 DEBUG_LOG(cb, "%s buf[%u] %p\n", __func__, scnt, buf[scnt]);
1710 dma_addr[scnt] = ib_dma_map_single(cb->pd->device,
1711 buf[scnt], cb->size,
1713 if (dma_mapping_error(cb->pd->device->dma_device,
1715 PRINTF(cb, "dma_map failed\n");
1719 DEBUG_LOG(cb, "%s dma_addr[%u] %p\n", __func__, scnt, (void *)dma_addr[scnt]);
1720 for (i=0; i<plen; i++) {
1721 pl[scnt]->page_list[i] = ((unsigned long)dma_addr[scnt] & PAGE_MASK) + (i * PAGE_SIZE);
1722 DEBUG_LOG(cb, "%s pl[%u]->page_list[%u] 0x%jx\n",
1723 __func__, scnt, i, (uintmax_t)pl[scnt]->page_list[i]);
1726 sgl[scnt].lkey = mr[scnt]->rkey;
1727 sgl[scnt].length = cb->size;
1728 sgl[scnt].addr = (u64)buf[scnt];
1729 DEBUG_LOG(cb, "%s sgl[%u].lkey 0x%x length %u addr 0x%jx\n",
1730 __func__, scnt, sgl[scnt].lkey, sgl[scnt].length,
1731 (uintmax_t)sgl[scnt].addr);
1733 fr[scnt].opcode = IB_WR_FAST_REG_MR;
1734 fr[scnt].wr_id = scnt;
1735 fr[scnt].send_flags = 0;
1736 fr[scnt].wr.fast_reg.page_shift = PAGE_SHIFT;
1737 fr[scnt].wr.fast_reg.length = cb->size;
1738 fr[scnt].wr.fast_reg.page_list = pl[scnt];
1739 fr[scnt].wr.fast_reg.page_list_len = plen;
1740 fr[scnt].wr.fast_reg.iova_start = (u64)buf[scnt];
1741 fr[scnt].wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
1742 fr[scnt].wr.fast_reg.rkey = mr[scnt]->rkey;
1743 fr[scnt].next = &read[scnt];
1744 read[scnt].opcode = IB_WR_RDMA_READ_WITH_INV;
1745 read[scnt].wr_id = scnt;
1746 read[scnt].send_flags = IB_SEND_SIGNALED;
1747 read[scnt].wr.rdma.rkey = cb->remote_rkey;
1748 read[scnt].wr.rdma.remote_addr = cb->remote_addr;
1749 read[scnt].num_sge = 1;
1750 read[scnt].sg_list = &sgl[scnt];
1751 ret = ib_post_send(cb->qp, &fr[scnt], &bad);
1753 PRINTF(cb, "ib_post_send failed %d\n", ret);
1758 start = time_uptime;
1759 DEBUG_LOG(cb, "%s starting IO.\n", __func__);
1760 while (!cb->count || cb->server || count < cb->count) {
1761 if ((time_uptime - start) >= 9) {
1762 DEBUG_LOG(cb, "%s pausing 1 tick! count %u\n", __func__,
1764 wait_event_interruptible_timeout(cb->sem,
1767 if (cb->state == ERROR)
1769 start = time_uptime;
1772 ret = ib_poll_cq(cb->cq, 1, &wc);
1774 PRINTF(cb, "ib_poll_cq failed %d\n",
1781 "completion error %u wr_id %ju "
1782 "opcode %d\n", wc.status,
1783 (uintmax_t)wc.wr_id, wc.opcode);
1787 if (count == cb->count)
1789 ib_update_fast_reg_key(mr[wc.wr_id], ++key);
1790 fr[wc.wr_id].wr.fast_reg.rkey =
1792 sgl[wc.wr_id].lkey = mr[wc.wr_id]->rkey;
1793 ret = ib_post_send(cb->qp, &fr[wc.wr_id], &bad);
1796 "ib_post_send failed %d\n", ret);
1799 } else if (krping_sigpending()) {
1800 PRINTF(cb, "signal!\n");
1805 DEBUG_LOG(cb, "%s done!\n", __func__);
1807 DEBUG_LOG(cb, "sleeping 1 second\n");
1808 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1809 DEBUG_LOG(cb, "draining the cq...\n");
1811 ret = ib_poll_cq(cb->cq, 1, &wc);
1813 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
1818 PRINTF(cb, "completion error %u "
1819 "opcode %u\n", wc.status, wc.opcode);
1824 DEBUG_LOG(cb, "destroying fr mrs!\n");
1825 for (scnt = 0; scnt < depth; scnt++) {
1827 ib_dereg_mr(mr[scnt]);
1828 DEBUG_LOG(cb, "%s dereg mr %p\n", __func__, mr[scnt]);
1831 DEBUG_LOG(cb, "unmapping/freeing bufs!\n");
1832 for (scnt = 0; scnt < depth; scnt++) {
1834 dma_unmap_single(cb->pd->device->dma_device,
1835 dma_addr[scnt], cb->size,
1838 DEBUG_LOG(cb, "%s unmap/free buf %p dma_addr %p\n", __func__, buf[scnt], (void *)dma_addr[scnt]);
1841 DEBUG_LOG(cb, "destroying fr page lists!\n");
1842 for (scnt = 0; scnt < depth; scnt++) {
1844 DEBUG_LOG(cb, "%s free pl %p\n", __func__, pl[scnt]);
1845 ib_free_fast_reg_page_list(pl[scnt]);
1864 static void krping_fr_test_server(struct krping_cb *cb)
1866 DEBUG_LOG(cb, "%s waiting for disconnect...\n", __func__);
1867 wait_event_interruptible(cb->sem, cb->state == ERROR);
1870 static void krping_fr_test5_server(struct krping_cb *cb)
1872 struct ib_send_wr *bad_wr;
1876 /* Spin waiting for client's Start STAG/TO/Len */
1877 while (cb->state < RDMA_READ_ADV) {
1878 krping_cq_event_handler(cb->cq, cb);
1880 DEBUG_LOG(cb, "%s client STAG %x TO 0x%jx\n", __func__,
1881 cb->remote_rkey, (uintmax_t)cb->remote_addr);
1883 /* Send STAG/TO/Len to client */
1884 krping_format_send(cb, cb->start_dma_addr);
1885 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1887 PRINTF(cb, "post send error %d\n", ret);
1891 /* Spin waiting for send completion */
1892 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1894 PRINTF(cb, "poll error %d\n", ret);
1898 PRINTF(cb, "send completiong error %d\n", wc.status);
1903 krping_fr_test5(cb);
1904 DEBUG_LOG(cb, "%s waiting for disconnect...\n", __func__);
1905 wait_event_interruptible(cb->sem, cb->state == ERROR);
1908 static void krping_fr_test5_client(struct krping_cb *cb)
1910 struct ib_send_wr *bad;
1914 cb->state = RDMA_READ_ADV;
1916 /* Send STAG/TO/Len to server */
1917 krping_format_send(cb, cb->start_dma_addr);
1918 if (cb->state == ERROR) {
1919 PRINTF(cb, "krping_format_send failed\n");
1922 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad);
1924 PRINTF(cb, "post send error %d\n", ret);
1928 /* Spin waiting for send completion */
1929 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1931 PRINTF(cb, "poll error %d\n", ret);
1935 PRINTF(cb, "send completion error %d\n", wc.status);
1939 /* Spin waiting for server's Start STAG/TO/Len */
1940 while (cb->state < RDMA_WRITE_ADV) {
1941 krping_cq_event_handler(cb->cq, cb);
1943 DEBUG_LOG(cb, "%s server STAG %x TO 0x%jx\n", __func__, cb->remote_rkey,
1944 (uintmax_t)cb->remote_addr);
1946 return krping_fr_test5(cb);
1950 * sq-depth worth of write + fastreg + inv, reposting them as the invs
1952 * NOTE: every 9 seconds we sleep for 1 second to keep the kernel happy.
1953 * If a count is given, then the last IO will have a bogus lkey in the
1954 * write work request. This reproduces a fw bug where the connection
1955 * will get stuck if a fastreg is processed while the ulptx is failing
1958 static void krping_fr_test6(struct krping_cb *cb)
1960 struct ib_fast_reg_page_list **pl;
1961 struct ib_send_wr *fr, *write, *inv, *bad;
1967 dma_addr_t *dma_addr;
1970 int plen = (((cb->size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1971 unsigned long start;
1974 int depth = cb->txdepth / 3;
1977 PRINTF(cb, "txdepth must be > 3 for this test!\n");
1981 pl = kzalloc(sizeof *pl * depth, GFP_KERNEL);
1982 DEBUG_LOG(cb, "%s pl %p size %zu\n", __func__, pl, sizeof *pl * depth);
1984 mr = kzalloc(sizeof *mr * depth, GFP_KERNEL);
1985 DEBUG_LOG(cb, "%s mr %p size %zu\n", __func__, mr, sizeof *mr * depth);
1987 fr = kzalloc(sizeof *fr * depth, GFP_KERNEL);
1988 DEBUG_LOG(cb, "%s fr %p size %zu\n", __func__, fr, sizeof *fr * depth);
1990 sgl = kzalloc(sizeof *sgl * depth, GFP_KERNEL);
1991 DEBUG_LOG(cb, "%s sgl %p size %zu\n", __func__, sgl, sizeof *sgl * depth);
1993 write = kzalloc(sizeof *write * depth, GFP_KERNEL);
1994 DEBUG_LOG(cb, "%s read %p size %zu\n", __func__, write, sizeof *write * depth);
1996 inv = kzalloc(sizeof *inv * depth, GFP_KERNEL);
1997 DEBUG_LOG(cb, "%s inv %p size %zu\n", __func__, inv, sizeof *inv * depth);
1999 buf = kzalloc(sizeof *buf * depth, GFP_KERNEL);
2000 DEBUG_LOG(cb, "%s buf %p size %zu\n", __func__, buf, sizeof *buf * depth);
2002 dma_addr = kzalloc(sizeof *dma_addr * depth, GFP_KERNEL);
2003 DEBUG_LOG(cb, "%s dma_addr %p size %zu\n", __func__, dma_addr, sizeof *dma_addr * depth);
2005 if (!pl || !mr || !fr || !write || !sgl || !buf || !dma_addr) {
2006 PRINTF(cb, "kzalloc failed\n");
2010 for (scnt = 0; scnt < depth; scnt++) {
2011 pl[scnt] = ib_alloc_fast_reg_page_list(cb->qp->device, plen);
2012 if (IS_ERR(pl[scnt])) {
2013 PRINTF(cb, "alloc_fr_page_list failed %ld\n",
2017 DEBUG_LOG(cb, "%s pl[%u] %p\n", __func__, scnt, pl[scnt]);
2019 mr[scnt] = ib_alloc_fast_reg_mr(cb->pd, plen);
2020 if (IS_ERR(mr[scnt])) {
2021 PRINTF(cb, "alloc_fr failed %ld\n",
2025 DEBUG_LOG(cb, "%s mr[%u] %p\n", __func__, scnt, mr[scnt]);
2026 ib_update_fast_reg_key(mr[scnt], ++key);
2028 buf[scnt] = kmalloc(cb->size, GFP_KERNEL);
2030 PRINTF(cb, "kmalloc failed\n");
2034 DEBUG_LOG(cb, "%s buf[%u] %p\n", __func__, scnt, buf[scnt]);
2035 dma_addr[scnt] = ib_dma_map_single(cb->pd->device,
2036 buf[scnt], cb->size,
2038 if (dma_mapping_error(cb->pd->device->dma_device,
2040 PRINTF(cb, "dma_map failed\n");
2044 DEBUG_LOG(cb, "%s dma_addr[%u] %p\n", __func__, scnt, (void *)dma_addr[scnt]);
2045 for (i=0; i<plen; i++) {
2046 pl[scnt]->page_list[i] = ((unsigned long)dma_addr[scnt] & PAGE_MASK) + (i * PAGE_SIZE);
2047 DEBUG_LOG(cb, "%s pl[%u]->page_list[%u] 0x%jx\n",
2048 __func__, scnt, i, (uintmax_t)pl[scnt]->page_list[i]);
2051 write[scnt].opcode = IB_WR_RDMA_WRITE;
2052 write[scnt].wr_id = scnt;
2053 write[scnt].wr.rdma.rkey = cb->remote_rkey;
2054 write[scnt].wr.rdma.remote_addr = cb->remote_addr;
2055 write[scnt].num_sge = 1;
2056 write[scnt].sg_list = &cb->rdma_sgl;
2057 write[scnt].sg_list->length = cb->size;
2058 write[scnt].next = &fr[scnt];
2060 fr[scnt].opcode = IB_WR_FAST_REG_MR;
2061 fr[scnt].wr_id = scnt;
2062 fr[scnt].wr.fast_reg.page_shift = PAGE_SHIFT;
2063 fr[scnt].wr.fast_reg.length = cb->size;
2064 fr[scnt].wr.fast_reg.page_list = pl[scnt];
2065 fr[scnt].wr.fast_reg.page_list_len = plen;
2066 fr[scnt].wr.fast_reg.iova_start = (u64)buf[scnt];
2067 fr[scnt].wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
2068 fr[scnt].wr.fast_reg.rkey = mr[scnt]->rkey;
2069 fr[scnt].next = &inv[scnt];
2071 inv[scnt].opcode = IB_WR_LOCAL_INV;
2072 inv[scnt].send_flags = IB_SEND_SIGNALED;
2073 inv[scnt].ex.invalidate_rkey = mr[scnt]->rkey;
2075 ret = ib_post_send(cb->qp, &write[scnt], &bad);
2077 PRINTF(cb, "ib_post_send failed %d\n", ret);
2082 start = time_uptime;
2083 DEBUG_LOG(cb, "%s starting IO.\n", __func__);
2084 while (!cb->count || cb->server || count < cb->count) {
2085 if ((time_uptime - start) >= 9) {
2086 DEBUG_LOG(cb, "%s pausing 1 tick! count %u\n", __func__,
2088 wait_event_interruptible_timeout(cb->sem,
2091 if (cb->state == ERROR)
2093 start = time_uptime;
2096 ret = ib_poll_cq(cb->cq, 1, &wc);
2098 PRINTF(cb, "ib_poll_cq failed %d\n",
2105 "completion error %u wr_id %ju "
2106 "opcode %d\n", wc.status,
2107 (uintmax_t)wc.wr_id, wc.opcode);
2111 if (count == (cb->count -1))
2112 cb->rdma_sgl.lkey = 0x00dead;
2113 if (count == cb->count)
2115 ib_update_fast_reg_key(mr[wc.wr_id], ++key);
2116 fr[wc.wr_id].wr.fast_reg.rkey =
2118 inv[wc.wr_id].ex.invalidate_rkey =
2120 ret = ib_post_send(cb->qp, &write[wc.wr_id], &bad);
2123 "ib_post_send failed %d\n", ret);
2126 } else if (krping_sigpending()){
2127 PRINTF(cb, "signal!\n");
2132 DEBUG_LOG(cb, "%s done!\n", __func__);
2134 DEBUG_LOG(cb, "sleeping 1 second\n");
2135 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2136 DEBUG_LOG(cb, "draining the cq...\n");
2138 ret = ib_poll_cq(cb->cq, 1, &wc);
2140 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2145 PRINTF(cb, "completion error %u "
2146 "opcode %u\n", wc.status, wc.opcode);
2151 DEBUG_LOG(cb, "destroying fr mrs!\n");
2152 for (scnt = 0; scnt < depth; scnt++) {
2154 ib_dereg_mr(mr[scnt]);
2155 DEBUG_LOG(cb, "%s dereg mr %p\n", __func__, mr[scnt]);
2158 DEBUG_LOG(cb, "unmapping/freeing bufs!\n");
2159 for (scnt = 0; scnt < depth; scnt++) {
2161 dma_unmap_single(cb->pd->device->dma_device,
2162 dma_addr[scnt], cb->size,
2165 DEBUG_LOG(cb, "%s unmap/free buf %p dma_addr %p\n", __func__, buf[scnt], (void *)dma_addr[scnt]);
2168 DEBUG_LOG(cb, "destroying fr page lists!\n");
2169 for (scnt = 0; scnt < depth; scnt++) {
2171 DEBUG_LOG(cb, "%s free pl %p\n", __func__, pl[scnt]);
2172 ib_free_fast_reg_page_list(pl[scnt]);
2194 static void krping_fr_test6_server(struct krping_cb *cb)
2196 struct ib_send_wr *bad_wr;
2200 /* Spin waiting for client's Start STAG/TO/Len */
2201 while (cb->state < RDMA_READ_ADV) {
2202 krping_cq_event_handler(cb->cq, cb);
2204 DEBUG_LOG(cb, "%s client STAG %x TO 0x%jx\n", __func__,
2205 cb->remote_rkey, (uintmax_t)cb->remote_addr);
2207 /* Send STAG/TO/Len to client */
2208 krping_format_send(cb, cb->start_dma_addr);
2209 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
2211 PRINTF(cb, "post send error %d\n", ret);
2215 /* Spin waiting for send completion */
2216 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
2218 PRINTF(cb, "poll error %d\n", ret);
2222 PRINTF(cb, "send completiong error %d\n", wc.status);
2227 krping_fr_test6(cb);
2228 DEBUG_LOG(cb, "%s waiting for disconnect...\n", __func__);
2229 wait_event_interruptible(cb->sem, cb->state == ERROR);
2232 static void krping_fr_test6_client(struct krping_cb *cb)
2234 struct ib_send_wr *bad;
2238 cb->state = RDMA_READ_ADV;
2240 /* Send STAG/TO/Len to server */
2241 krping_format_send(cb, cb->start_dma_addr);
2242 if (cb->state == ERROR) {
2243 PRINTF(cb, "krping_format_send failed\n");
2246 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad);
2248 PRINTF(cb, "post send error %d\n", ret);
2252 /* Spin waiting for send completion */
2253 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
2255 PRINTF(cb, "poll error %d\n", ret);
2259 PRINTF(cb, "send completion error %d\n", wc.status);
2263 /* Spin waiting for server's Start STAG/TO/Len */
2264 while (cb->state < RDMA_WRITE_ADV) {
2265 krping_cq_event_handler(cb->cq, cb);
2267 DEBUG_LOG(cb, "%s server STAG %x TO 0x%jx\n", __func__, cb->remote_rkey,
2268 (uintmax_t)cb->remote_addr);
2270 return krping_fr_test6(cb);
2273 static void krping_run_server(struct krping_cb *cb)
2275 struct ib_recv_wr *bad_wr;
2278 ret = krping_bind_server(cb);
2282 ret = krping_setup_qp(cb, cb->child_cm_id);
2284 PRINTF(cb, "setup_qp failed: %d\n", ret);
2288 ret = krping_setup_buffers(cb);
2290 PRINTF(cb, "krping_setup_buffers failed: %d\n", ret);
2294 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
2296 PRINTF(cb, "ib_post_recv failed: %d\n", ret);
2300 ret = krping_accept(cb);
2302 PRINTF(cb, "connect error %d\n", ret);
2307 krping_wlat_test_server(cb);
2309 krping_rlat_test_server(cb);
2311 krping_bw_test_server(cb);
2312 else if (cb->frtest) {
2313 switch (cb->testnum) {
2318 krping_fr_test_server(cb);
2321 krping_fr_test5_server(cb);
2324 krping_fr_test6_server(cb);
2327 PRINTF(cb, "unknown fr test %d\n", cb->testnum);
2332 krping_test_server(cb);
2333 rdma_disconnect(cb->child_cm_id);
2335 krping_free_buffers(cb);
2339 rdma_destroy_id(cb->child_cm_id);
2342 static void krping_test_client(struct krping_cb *cb)
2344 int ping, start, cc, i, ret;
2345 struct ib_send_wr *bad_wr;
2349 for (ping = 0; !cb->count || ping < cb->count; ping++) {
2350 cb->state = RDMA_READ_ADV;
2352 /* Put some ascii text in the buffer. */
2353 cc = sprintf(cb->start_buf, "rdma-ping-%d: ", ping);
2354 for (i = cc, c = start; i < cb->size; i++) {
2355 cb->start_buf[i] = c;
2363 cb->start_buf[cb->size - 1] = 0;
2365 krping_format_send(cb, cb->start_dma_addr);
2366 if (cb->state == ERROR) {
2367 PRINTF(cb, "krping_format_send failed\n");
2370 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
2372 PRINTF(cb, "post send error %d\n", ret);
2376 /* Wait for server to ACK */
2377 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
2378 if (cb->state != RDMA_WRITE_ADV) {
2380 "wait for RDMA_WRITE_ADV state %d\n",
2385 krping_format_send(cb, cb->rdma_dma_addr);
2386 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
2388 PRINTF(cb, "post send error %d\n", ret);
2392 /* Wait for the server to say the RDMA Write is complete. */
2393 wait_event_interruptible(cb->sem,
2394 cb->state >= RDMA_WRITE_COMPLETE);
2395 if (cb->state != RDMA_WRITE_COMPLETE) {
2397 "wait for RDMA_WRITE_COMPLETE state %d\n",
2403 if (memcmp(cb->start_buf, cb->rdma_buf, cb->size)) {
2404 PRINTF(cb, "data mismatch!\n");
2409 if (strlen(cb->rdma_buf) > 128) {
2412 strlcpy(msgbuf, cb->rdma_buf, sizeof(msgbuf));
2413 PRINTF(cb, "ping data stripped: %s\n",
2416 PRINTF(cb, "ping data: %s\n", cb->rdma_buf);
2419 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2424 static void krping_rlat_test_client(struct krping_cb *cb)
2426 struct ib_send_wr *bad_wr;
2430 cb->state = RDMA_READ_ADV;
2432 /* Send STAG/TO/Len to client */
2433 krping_format_send(cb, cb->start_dma_addr);
2434 if (cb->state == ERROR) {
2435 PRINTF(cb, "krping_format_send failed\n");
2438 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
2440 PRINTF(cb, "post send error %d\n", ret);
2444 /* Spin waiting for send completion */
2445 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
2447 PRINTF(cb, "poll error %d\n", ret);
2451 PRINTF(cb, "send completion error %d\n", wc.status);
2455 /* Spin waiting for server's Start STAG/TO/Len */
2456 while (cb->state < RDMA_WRITE_ADV) {
2457 krping_cq_event_handler(cb->cq, cb);
2463 struct timeval start, stop;
2466 unsigned long long elapsed;
2468 struct ib_send_wr *bad_wr;
2471 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE;
2472 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey;
2473 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr;
2474 cb->rdma_sq_wr.sg_list->length = 0;
2475 cb->rdma_sq_wr.num_sge = 0;
2478 for (i=0; i < 100000; i++) {
2479 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) {
2480 PRINTF(cb, "Couldn't post send\n");
2484 ne = ib_poll_cq(cb->cq, 1, &wc);
2487 PRINTF(cb, "poll CQ failed %d\n", ne);
2490 if (wc.status != IB_WC_SUCCESS) {
2491 PRINTF(cb, "Completion wth error at %s:\n",
2492 cb->server ? "server" : "client");
2493 PRINTF(cb, "Failed status %d: wr_id %d\n",
2494 wc.status, (int) wc.wr_id);
2500 if (stop.tv_usec < start.tv_usec) {
2501 stop.tv_usec += 1000000;
2504 sec = stop.tv_sec - start.tv_sec;
2505 usec = stop.tv_usec - start.tv_usec;
2506 elapsed = sec * 1000000 + usec;
2507 PRINTF(cb, "0B-write-lat iters 100000 usec %llu\n", elapsed);
2514 static void krping_wlat_test_client(struct krping_cb *cb)
2516 struct ib_send_wr *bad_wr;
2520 cb->state = RDMA_READ_ADV;
2522 /* Send STAG/TO/Len to client */
2523 krping_format_send(cb, cb->start_dma_addr);
2524 if (cb->state == ERROR) {
2525 PRINTF(cb, "krping_format_send failed\n");
2528 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
2530 PRINTF(cb, "post send error %d\n", ret);
2534 /* Spin waiting for send completion */
2535 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
2537 PRINTF(cb, "poll error %d\n", ret);
2541 PRINTF(cb, "send completion error %d\n", wc.status);
2545 /* Spin waiting for server's Start STAG/TO/Len */
2546 while (cb->state < RDMA_WRITE_ADV) {
2547 krping_cq_event_handler(cb->cq, cb);
2553 static void krping_bw_test_client(struct krping_cb *cb)
2555 struct ib_send_wr *bad_wr;
2559 cb->state = RDMA_READ_ADV;
2561 /* Send STAG/TO/Len to client */
2562 krping_format_send(cb, cb->start_dma_addr);
2563 if (cb->state == ERROR) {
2564 PRINTF(cb, "krping_format_send failed\n");
2567 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
2569 PRINTF(cb, "post send error %d\n", ret);
2573 /* Spin waiting for send completion */
2574 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
2576 PRINTF(cb, "poll error %d\n", ret);
2580 PRINTF(cb, "send completion error %d\n", wc.status);
2584 /* Spin waiting for server's Start STAG/TO/Len */
2585 while (cb->state < RDMA_WRITE_ADV) {
2586 krping_cq_event_handler(cb->cq, cb);
2594 * fastreg 2 valid different mrs and verify the completions.
2596 static void krping_fr_test1(struct krping_cb *cb)
2598 struct ib_fast_reg_page_list *pl;
2599 struct ib_send_wr fr, *bad;
2601 struct ib_mr *mr1, *mr2;
2604 int size = cb->size;
2605 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
2608 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen);
2610 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl));
2614 mr1 = ib_alloc_fast_reg_mr(cb->pd, plen);
2616 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl));
2619 mr2 = ib_alloc_fast_reg_mr(cb->pd, plen);
2621 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl));
2626 for (i=0; i<plen; i++)
2627 pl->page_list[i] = i * PAGE_SIZE;
2629 memset(&fr, 0, sizeof fr);
2630 fr.opcode = IB_WR_FAST_REG_MR;
2632 fr.wr.fast_reg.page_shift = PAGE_SHIFT;
2633 fr.wr.fast_reg.length = size;
2634 fr.wr.fast_reg.page_list = pl;
2635 fr.wr.fast_reg.page_list_len = plen;
2636 fr.wr.fast_reg.iova_start = 0;
2637 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
2638 fr.send_flags = IB_SEND_SIGNALED;
2639 fr.wr.fast_reg.rkey = mr1->rkey;
2640 DEBUG_LOG(cb, "%s fr1: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth);
2641 ret = ib_post_send(cb->qp, &fr, &bad);
2643 PRINTF(cb, "ib_post_send failed %d\n", ret);
2646 fr.wr.fast_reg.rkey = mr2->rkey;
2647 DEBUG_LOG(cb, "%s fr2: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth);
2648 ret = ib_post_send(cb->qp, &fr, &bad);
2650 PRINTF(cb, "ib_post_send failed %d\n", ret);
2654 DEBUG_LOG(cb, "sleeping 1 second\n");
2655 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2657 ret = ib_poll_cq(cb->cq, 1, &wc);
2659 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2663 DEBUG_LOG(cb, "completion status %u wr %s\n",
2664 wc.status, wc.wr_id == 1 ? "fr" : "inv");
2666 } else if (krping_sigpending()) {
2667 PRINTF(cb, "signal!\n");
2671 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2672 } while (count != 2);
2674 DEBUG_LOG(cb, "sleeping 1 second\n");
2675 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2676 DEBUG_LOG(cb, "draining the cq...\n");
2678 ret = ib_poll_cq(cb->cq, 1, &wc);
2680 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2684 PRINTF(cb, "completion %u opcode %u\n", wc.status, wc.opcode);
2687 DEBUG_LOG(cb, "destroying fr mr2!\n");
2691 DEBUG_LOG(cb, "destroying fr mr1!\n");
2694 DEBUG_LOG(cb, "destroying fr page list!\n");
2695 ib_free_fast_reg_page_list(pl);
2696 DEBUG_LOG(cb, "%s done!\n", __func__);
2700 * fastreg the same mr twice, 2nd one should produce error cqe.
2702 static void krping_fr_test2(struct krping_cb *cb)
2704 struct ib_fast_reg_page_list *pl;
2705 struct ib_send_wr fr, *bad;
2710 int size = cb->size;
2711 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
2714 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen);
2716 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl));
2720 mr1 = ib_alloc_fast_reg_mr(cb->pd, plen);
2722 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl));
2726 for (i=0; i<plen; i++)
2727 pl->page_list[i] = i * PAGE_SIZE;
2729 memset(&fr, 0, sizeof fr);
2730 fr.opcode = IB_WR_FAST_REG_MR;
2732 fr.wr.fast_reg.page_shift = PAGE_SHIFT;
2733 fr.wr.fast_reg.length = size;
2734 fr.wr.fast_reg.page_list = pl;
2735 fr.wr.fast_reg.page_list_len = plen;
2736 fr.wr.fast_reg.iova_start = 0;
2737 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
2738 fr.send_flags = IB_SEND_SIGNALED;
2739 fr.wr.fast_reg.rkey = mr1->rkey;
2740 DEBUG_LOG(cb, "%s fr1: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth);
2741 ret = ib_post_send(cb->qp, &fr, &bad);
2743 PRINTF(cb, "ib_post_send failed %d\n", ret);
2746 DEBUG_LOG(cb, "%s fr2: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth);
2747 ret = ib_post_send(cb->qp, &fr, &bad);
2749 PRINTF(cb, "ib_post_send failed %d\n", ret);
2753 DEBUG_LOG(cb, "sleeping 1 second\n");
2754 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2756 ret = ib_poll_cq(cb->cq, 1, &wc);
2758 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2762 DEBUG_LOG(cb, "completion status %u wr %s\n",
2763 wc.status, wc.wr_id == 1 ? "fr" : "inv");
2765 } else if (krping_sigpending()) {
2766 PRINTF(cb, "signal!\n");
2769 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2770 } while (count != 2);
2772 DEBUG_LOG(cb, "sleeping 1 second\n");
2773 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2774 DEBUG_LOG(cb, "draining the cq...\n");
2776 ret = ib_poll_cq(cb->cq, 1, &wc);
2778 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2782 PRINTF(cb, "completion %u opcode %u\n", wc.status, wc.opcode);
2785 DEBUG_LOG(cb, "destroying fr mr1!\n");
2788 DEBUG_LOG(cb, "destroying fr page list!\n");
2789 ib_free_fast_reg_page_list(pl);
2790 DEBUG_LOG(cb, "%s done!\n", __func__);
2794 * fastreg pipelined in a loop as fast as we can until the user interrupts.
2795 * NOTE: every 9 seconds we sleep for 1 second to keep the kernel happy.
2797 static void krping_fr_test3(struct krping_cb *cb)
2799 struct ib_fast_reg_page_list *pl;
2800 struct ib_send_wr fr, inv, *bad;
2806 int size = cb->size;
2807 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
2808 unsigned long start;
2813 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen);
2815 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl));
2819 mr = ib_alloc_fast_reg_mr(cb->pd, plen);
2821 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl));
2825 for (i=0; i<plen; i++)
2826 pl->page_list[i] = i * PAGE_SIZE;
2828 memset(&fr, 0, sizeof fr);
2829 fr.opcode = IB_WR_FAST_REG_MR;
2830 fr.wr.fast_reg.page_shift = PAGE_SHIFT;
2831 fr.wr.fast_reg.length = size;
2832 fr.wr.fast_reg.page_list = pl;
2833 fr.wr.fast_reg.page_list_len = plen;
2834 fr.wr.fast_reg.iova_start = 0;
2835 fr.send_flags = IB_SEND_SIGNALED;
2836 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
2838 memset(&inv, 0, sizeof inv);
2839 inv.opcode = IB_WR_LOCAL_INV;
2840 inv.send_flags = IB_SEND_SIGNALED;
2842 DEBUG_LOG(cb, "fr_test: stag index 0x%x plen %u size %u depth %u\n", mr->rkey >> 8, plen, cb->size, cb->txdepth);
2843 start = time_uptime;
2845 if ((time_uptime - start) >= 9) {
2846 DEBUG_LOG(cb, "fr_test: pausing 1 second! count %u latest size %u plen %u\n", count, size, plen);
2847 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2848 if (cb->state == ERROR)
2850 start = time_uptime;
2852 while (scnt < (cb->txdepth>>1)) {
2853 ib_update_fast_reg_key(mr, ++key);
2854 fr.wr.fast_reg.rkey = mr->rkey;
2855 inv.ex.invalidate_rkey = mr->rkey;
2856 size = arc4random() % cb->size;
2859 plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
2860 fr.wr.fast_reg.length = size;
2861 fr.wr.fast_reg.page_list_len = plen;
2862 ret = ib_post_send(cb->qp, &fr, &bad);
2864 PRINTF(cb, "ib_post_send failed %d\n", ret);
2871 ret = ib_poll_cq(cb->cq, 1, &wc);
2873 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2878 PRINTF(cb, "completion error %u\n", wc.status);
2884 else if (krping_sigpending()) {
2885 PRINTF(cb, "signal!\n");
2891 DEBUG_LOG(cb, "sleeping 1 second\n");
2892 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2893 DEBUG_LOG(cb, "draining the cq...\n");
2895 ret = ib_poll_cq(cb->cq, 1, &wc);
2897 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2902 PRINTF(cb, "completion error %u opcode %u\n", wc.status, wc.opcode);
2906 DEBUG_LOG(cb, "fr_test: done!\n");
2909 DEBUG_LOG(cb, "destroying fr page list!\n");
2910 ib_free_fast_reg_page_list(pl);
2911 DEBUG_LOG(cb, "%s done!\n", __func__);
2915 * fastreg 1 and invalidate 1 mr and verify completion.
2917 static void krping_fr_test4(struct krping_cb *cb)
2919 struct ib_fast_reg_page_list *pl;
2920 struct ib_send_wr fr, inv, *bad;
2925 int size = cb->size;
2926 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
2929 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen);
2931 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl));
2935 mr1 = ib_alloc_fast_reg_mr(cb->pd, plen);
2937 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl));
2941 for (i=0; i<plen; i++)
2942 pl->page_list[i] = i * PAGE_SIZE;
2944 memset(&fr, 0, sizeof fr);
2945 fr.opcode = IB_WR_FAST_REG_MR;
2947 fr.wr.fast_reg.page_shift = PAGE_SHIFT;
2948 fr.wr.fast_reg.length = size;
2949 fr.wr.fast_reg.page_list = pl;
2950 fr.wr.fast_reg.page_list_len = plen;
2951 fr.wr.fast_reg.iova_start = 0;
2952 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
2953 fr.send_flags = IB_SEND_SIGNALED;
2954 fr.wr.fast_reg.rkey = mr1->rkey;
2956 memset(&inv, 0, sizeof inv);
2957 inv.opcode = IB_WR_LOCAL_INV;
2958 inv.ex.invalidate_rkey = mr1->rkey;
2960 DEBUG_LOG(cb, "%s fr1: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth);
2961 ret = ib_post_send(cb->qp, &fr, &bad);
2963 PRINTF(cb, "ib_post_send failed %d\n", ret);
2966 DEBUG_LOG(cb, "sleeping 1 second\n");
2967 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2969 ret = ib_poll_cq(cb->cq, 1, &wc);
2971 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2975 DEBUG_LOG(cb, "completion status %u wr %s\n",
2976 wc.status, wc.wr_id == 1 ? "fr" : "inv");
2978 } else if (krping_sigpending()) {
2979 PRINTF(cb, "signal!\n");
2982 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2983 } while (count != 1);
2985 DEBUG_LOG(cb, "sleeping 1 second\n");
2986 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
2987 DEBUG_LOG(cb, "draining the cq...\n");
2989 ret = ib_poll_cq(cb->cq, 1, &wc);
2991 PRINTF(cb, "ib_poll_cq failed %d\n", ret);
2995 PRINTF(cb, "completion %u opcode %u\n", wc.status, wc.opcode);
2998 DEBUG_LOG(cb, "destroying fr mr1!\n");
3001 DEBUG_LOG(cb, "destroying fr page list!\n");
3002 ib_free_fast_reg_page_list(pl);
3003 DEBUG_LOG(cb, "%s done!\n", __func__);
3006 static void krping_fr_test(struct krping_cb *cb)
3008 switch (cb->testnum) {
3010 krping_fr_test1(cb);
3013 krping_fr_test2(cb);
3016 krping_fr_test3(cb);
3019 krping_fr_test4(cb);
3022 krping_fr_test5_client(cb);
3025 krping_fr_test6_client(cb);
3028 PRINTF(cb, "Unkown frtest num %u\n", cb->testnum);
3033 static int krping_connect_client(struct krping_cb *cb)
3035 struct rdma_conn_param conn_param;
3038 memset(&conn_param, 0, sizeof conn_param);
3039 conn_param.responder_resources = 1;
3040 conn_param.initiator_depth = 1;
3041 conn_param.retry_count = 10;
3043 ret = rdma_connect(cb->cm_id, &conn_param);
3045 PRINTF(cb, "rdma_connect error %d\n", ret);
3049 wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
3050 if (cb->state == ERROR) {
3051 PRINTF(cb, "wait for CONNECTED state %d\n", cb->state);
3055 DEBUG_LOG(cb, "rdma_connect successful\n");
3059 static int krping_bind_client(struct krping_cb *cb)
3061 struct sockaddr_in sin;
3064 memset(&sin, 0, sizeof(sin));
3065 sin.sin_len = sizeof sin;
3066 sin.sin_family = AF_INET;
3067 sin.sin_addr.s_addr = cb->addr.s_addr;
3068 sin.sin_port = cb->port;
3070 ret = rdma_resolve_addr(cb->cm_id, NULL, (struct sockaddr *) &sin,
3073 PRINTF(cb, "rdma_resolve_addr error %d\n", ret);
3077 wait_event_interruptible(cb->sem, cb->state >= ROUTE_RESOLVED);
3078 if (cb->state != ROUTE_RESOLVED) {
3080 "addr/route resolution did not resolve: state %d\n",
3085 if (cb->mem == FASTREG && !fastreg_supported(cb, 0))
3088 DEBUG_LOG(cb, "rdma_resolve_addr - rdma_resolve_route successful\n");
3092 static void krping_run_client(struct krping_cb *cb)
3094 struct ib_recv_wr *bad_wr;
3097 ret = krping_bind_client(cb);
3101 ret = krping_setup_qp(cb, cb->cm_id);
3103 PRINTF(cb, "setup_qp failed: %d\n", ret);
3107 ret = krping_setup_buffers(cb);
3109 PRINTF(cb, "krping_setup_buffers failed: %d\n", ret);
3113 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
3115 PRINTF(cb, "ib_post_recv failed: %d\n", ret);
3119 ret = krping_connect_client(cb);
3121 PRINTF(cb, "connect error %d\n", ret);
3126 krping_wlat_test_client(cb);
3128 krping_rlat_test_client(cb);
3130 krping_bw_test_client(cb);
3131 else if (cb->frtest)
3134 krping_test_client(cb);
3135 rdma_disconnect(cb->cm_id);
3137 krping_free_buffers(cb);
3142 int krping_doit(char *cmd, void *cookie)
3144 struct krping_cb *cb;
3148 unsigned long optint;
3150 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
3154 mutex_lock(&krping_mutex);
3155 list_add_tail(&cb->list, &krping_cbs);
3156 mutex_unlock(&krping_mutex);
3158 cb->cookie = cookie;
3162 cb->txdepth = RPING_SQ_DEPTH;
3164 init_waitqueue_head(&cb->sem);
3166 while ((op = krping_getopt("krping", &cmd, krping_opts, NULL, &optarg,
3170 cb->addr_str = optarg;
3171 DEBUG_LOG(cb, "ipaddr (%s)\n", optarg);
3172 if (!inet_aton(optarg, &cb->addr)) {
3173 PRINTF(cb, "bad addr string %s\n",
3179 cb->port = htons(optint);
3180 DEBUG_LOG(cb, "port %d\n", (int)optint);
3184 DEBUG_LOG(cb, "server\n");
3188 DEBUG_LOG(cb, "server\n");
3192 DEBUG_LOG(cb, "client\n");
3196 if ((cb->size < 1) ||
3197 (cb->size > RPING_BUFSIZE)) {
3198 PRINTF(cb, "Invalid size %d "
3199 "(valid range is 1 to %d)\n",
3200 cb->size, RPING_BUFSIZE);
3203 DEBUG_LOG(cb, "size %d\n", (int)optint);
3207 if (cb->count < 0) {
3208 PRINTF(cb, "Invalid count %d\n",
3212 DEBUG_LOG(cb, "count %d\n", (int) cb->count);
3216 DEBUG_LOG(cb, "verbose\n");
3220 DEBUG_LOG(cb, "validate data\n");
3235 if (!strncmp(optarg, "dma", 3))
3237 else if (!strncmp(optarg, "fastreg", 7))
3239 else if (!strncmp(optarg, "mw", 2))
3241 else if (!strncmp(optarg, "mr", 2))
3244 PRINTF(cb, "unknown mem mode %s. "
3245 "Must be dma, fastreg, mw, or mr\n",
3252 cb->server_invalidate = 1;
3255 cb->txdepth = optint;
3256 DEBUG_LOG(cb, "txdepth %d\n", (int) cb->txdepth);
3259 cb->local_dma_lkey = 1;
3260 DEBUG_LOG(cb, "using local dma lkey\n");
3264 DEBUG_LOG(cb, "using read-with-inv\n");
3268 cb->testnum = optint;
3269 DEBUG_LOG(cb, "fast-reg test!\n");
3272 PRINTF(cb, "unknown opt %s\n", optarg);
3280 if (cb->server == -1) {
3281 PRINTF(cb, "must be either client or server\n");
3286 if ((cb->frtest + cb->bw + cb->rlat + cb->wlat) > 1) {
3287 PRINTF(cb, "Pick only one test: fr, bw, rlat, wlat\n");
3291 if (cb->server_invalidate && cb->mem != FASTREG) {
3292 PRINTF(cb, "server_invalidate only valid with fastreg mem_mode\n");
3297 if (cb->read_inv && cb->mem != FASTREG) {
3298 PRINTF(cb, "read_inv only valid with fastreg mem_mode\n");
3303 if (cb->mem != MR && (cb->wlat || cb->rlat || cb->bw || cb->frtest)) {
3304 PRINTF(cb, "wlat, rlat, and bw tests only support mem_mode MR\n");
3309 cb->cm_id = rdma_create_id(krping_cma_event_handler, cb, RDMA_PS_TCP, IB_QPT_RC);
3310 if (IS_ERR(cb->cm_id)) {
3311 ret = PTR_ERR(cb->cm_id);
3312 PRINTF(cb, "rdma_create_id error %d\n", ret);
3315 DEBUG_LOG(cb, "created cm_id %p\n", cb->cm_id);
3318 krping_run_server(cb);
3320 krping_run_client(cb);
3322 DEBUG_LOG(cb, "destroy cm_id %p\n", cb->cm_id);
3323 rdma_destroy_id(cb->cm_id);
3325 mutex_lock(&krping_mutex);
3326 list_del(&cb->list);
3327 mutex_unlock(&krping_mutex);
3333 krping_walk_cb_list(void (*f)(struct krping_stats *, void *), void *arg)
3335 struct krping_cb *cb;
3337 mutex_lock(&krping_mutex);
3338 list_for_each_entry(cb, &krping_cbs, list)
3339 (*f)(cb->pd ? &cb->stats : NULL, arg);
3340 mutex_unlock(&krping_mutex);
3343 void krping_init(void)
3346 mutex_init(&krping_mutex);