2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #define LINUXKPI_PARAM_PREFIX iw_cxgbe_
42 #include <asm/pgtable.h>
43 #include <linux/page.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
50 static int fastreg_support = 1;
51 module_param(fastreg_support, int, 0644);
52 MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)");
54 static int c4iw_modify_port(struct ib_device *ibdev,
55 u8 port, int port_modify_mask,
56 struct ib_port_modify *props)
61 static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
62 struct ib_ah_attr *ah_attr,
63 struct ib_udata *udata)
65 return ERR_PTR(-ENOSYS);
68 static int c4iw_ah_destroy(struct ib_ah *ah)
73 static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
78 static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
83 static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
84 u8 port_num, const struct ib_wc *in_wc,
85 const struct ib_grh *in_grh,
86 const struct ib_mad_hdr *in_mad,
88 struct ib_mad_hdr *out_mad,
90 u16 *out_mad_pkey_index)
96 void _c4iw_free_ucontext(struct kref *kref)
98 struct c4iw_ucontext *ucontext;
100 struct c4iw_mm_entry *mm, *tmp;
102 ucontext = container_of(kref, struct c4iw_ucontext, kref);
103 rhp = to_c4iw_dev(ucontext->ibucontext.device);
105 CTR2(KTR_IW_CXGBE, "%s ucontext %p", __func__, ucontext);
106 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
108 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
112 static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
114 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
116 CTR2(KTR_IW_CXGBE, "%s context %p", __func__, context);
117 c4iw_put_ucontext(ucontext);
121 static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
122 struct ib_udata *udata)
124 struct c4iw_ucontext *context;
125 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
127 struct c4iw_alloc_ucontext_resp uresp;
129 struct c4iw_mm_entry *mm = NULL;
131 PDBG("%s ibdev %p\n", __func__, ibdev);
132 context = kzalloc(sizeof(*context), GFP_KERNEL);
138 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
139 INIT_LIST_HEAD(&context->mmaps);
140 spin_lock_init(&context->mmap_lock);
141 kref_init(&context->kref);
143 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
145 log(LOG_ERR, "%s Warning - downlevel libcxgb4 "
146 "(non-fatal), device status page disabled.\n",
148 rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
151 mm = kmalloc(sizeof *mm, GFP_KERNEL);
155 uresp.status_page_size = PAGE_SIZE;
157 spin_lock(&context->mmap_lock);
158 uresp.status_page_key = context->key;
159 context->key += PAGE_SIZE;
160 spin_unlock(&context->mmap_lock);
162 ret = ib_copy_to_udata(udata, &uresp,
163 sizeof(uresp) - sizeof(uresp.reserved));
167 mm->key = uresp.status_page_key;
168 mm->addr = vtophys(rhp->rdev.status_page);
170 insert_mmap(context, mm);
172 return &context->ibucontext;
181 static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
183 int len = vma->vm_end - vma->vm_start;
184 u32 key = vma->vm_pgoff << PAGE_SHIFT;
185 struct c4iw_rdev *rdev;
187 struct c4iw_mm_entry *mm;
188 struct c4iw_ucontext *ucontext;
191 CTR4(KTR_IW_CXGBE, "%s:1 ctx %p vma %p, vm_start %u", __func__,
192 context, vma, vma->vm_start);
194 CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
195 vma->vm_pgoff, key, len);
197 if (vma->vm_start & (PAGE_SIZE-1)) {
198 CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
199 __func__, vma->vm_start, vma);
203 rdev = &(to_c4iw_dev(context->device)->rdev);
204 ucontext = to_c4iw_ucontext(context);
206 mm = remove_mmap(ucontext, key, len);
208 CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
215 /* user DB-GTS registers if addr in udbs_res range,
216 * else WQ or CQ memory.
218 if (rdev->adap->iwt.wc_en && addr >= rdev->bar2_pa &&
219 addr < rdev->bar2_pa + rdev->bar2_len)
220 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
222 ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
223 len, vma->vm_page_prot);
224 CTR4(KTR_IW_CXGBE, "%s:4 ctx %p vma %p ret %u", __func__, context, vma,
230 c4iw_deallocate_pd(struct ib_pd *pd)
232 struct c4iw_pd *php = to_c4iw_pd(pd);
233 struct c4iw_dev *rhp = php->rhp;
235 CTR3(KTR_IW_CXGBE, "%s: pd %p, pdid 0x%x", __func__, pd, php->pdid);
237 c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
238 mutex_lock(&rhp->rdev.stats.lock);
239 rhp->rdev.stats.pd.cur--;
240 mutex_unlock(&rhp->rdev.stats.lock);
246 static struct ib_pd *
247 c4iw_allocate_pd(struct ib_device *ibdev, struct ib_ucontext *context,
248 struct ib_udata *udata)
252 struct c4iw_dev *rhp;
254 CTR4(KTR_IW_CXGBE, "%s: ibdev %p, context %p, data %p", __func__, ibdev,
256 rhp = (struct c4iw_dev *) ibdev;
257 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
259 return ERR_PTR(-EINVAL);
260 php = kzalloc(sizeof(*php), GFP_KERNEL);
262 c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
263 return ERR_PTR(-ENOMEM);
268 if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
269 c4iw_deallocate_pd(&php->ibpd);
270 return ERR_PTR(-EFAULT);
273 mutex_lock(&rhp->rdev.stats.lock);
274 rhp->rdev.stats.pd.cur++;
275 if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
276 rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
277 mutex_unlock(&rhp->rdev.stats.lock);
280 "%s: ibdev %p, context %p, data %p, pddid 0x%x, pd %p", __func__,
281 ibdev, context, udata, pdid, php);
286 c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
289 CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, pkey %p", __func__,
290 ibdev, port, index, pkey);
297 c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid)
299 struct c4iw_dev *dev;
300 struct port_info *pi;
303 CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, gid %p", __func__,
304 ibdev, port, index, gid);
306 memset(&gid->raw[0], 0, sizeof(gid->raw));
307 dev = to_c4iw_dev(ibdev);
309 if (port == 0 || port > sc->params.nports)
311 pi = sc->port[port - 1];
312 memcpy(&gid->raw[0], pi->vi[0].hw_addr, ETHER_ADDR_LEN);
317 c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
318 struct ib_udata *uhw)
320 struct c4iw_dev *dev = to_c4iw_dev(ibdev);
321 struct adapter *sc = dev->rdev.adap;
323 CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props);
325 if (uhw->inlen || uhw->outlen)
328 memset(props, 0, sizeof *props);
329 memcpy(&props->sys_image_guid, sc->port[0]->vi[0].hw_addr,
331 props->hw_ver = sc->params.chipid;
332 props->fw_ver = sc->params.fw_vers;
333 props->device_cap_flags = dev->device_cap_flags;
334 props->page_size_cap = T4_PAGESIZE_MASK;
335 props->vendor_id = pci_get_vendor(sc->dev);
336 props->vendor_part_id = pci_get_device(sc->dev);
337 props->max_mr_size = T4_MAX_MR_SIZE;
338 props->max_qp = sc->vres.qp.size / 2;
339 props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
340 props->max_sge = T4_MAX_RECV_SGE;
341 props->max_sge_rd = 1;
342 props->max_res_rd_atom = sc->params.max_ird_adapter;
343 props->max_qp_rd_atom = min(sc->params.max_ordird_qp,
344 c4iw_max_read_depth);
345 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
346 props->max_cq = sc->vres.qp.size;
347 props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
348 props->max_mr = c4iw_num_stags(&dev->rdev);
349 props->max_pd = T4_MAX_NUM_PD;
350 props->local_ca_ack_delay = 0;
351 props->max_fast_reg_page_list_len =
352 t4_max_fr_depth(sc->params.ulptx_memwrite_dsgl && use_dsgl);
358 * Returns -errno on failure.
361 c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
363 struct c4iw_dev *dev;
365 struct port_info *pi;
368 CTR4(KTR_IW_CXGBE, "%s ibdev %p, port %d, props %p", __func__, ibdev,
371 dev = to_c4iw_dev(ibdev);
373 if (port > sc->params.nports)
375 pi = sc->port[port - 1];
378 memset(props, 0, sizeof(struct ib_port_attr));
379 props->max_mtu = IB_MTU_4096;
380 if (ifp->if_mtu >= 4096)
381 props->active_mtu = IB_MTU_4096;
382 else if (ifp->if_mtu >= 2048)
383 props->active_mtu = IB_MTU_2048;
384 else if (ifp->if_mtu >= 1024)
385 props->active_mtu = IB_MTU_1024;
386 else if (ifp->if_mtu >= 512)
387 props->active_mtu = IB_MTU_512;
389 props->active_mtu = IB_MTU_256;
390 props->state = pi->link_cfg.link_ok ? IB_PORT_ACTIVE : IB_PORT_DOWN;
391 props->port_cap_flags =
393 IB_PORT_SNMP_TUNNEL_SUP |
395 IB_PORT_DEVICE_MGMT_SUP |
396 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
397 props->gid_tbl_len = 1;
398 props->pkey_tbl_len = 1;
399 props->active_width = 2;
400 props->active_speed = 2;
401 props->max_msg_sz = -1;
406 static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
407 struct ib_port_immutable *immutable)
409 struct ib_port_attr attr;
412 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
414 err = ib_query_port(ibdev, port_num, &attr);
418 immutable->pkey_tbl_len = attr.pkey_tbl_len;
419 immutable->gid_tbl_len = attr.gid_tbl_len;
425 * Returns -errno on error.
428 c4iw_register_device(struct c4iw_dev *dev)
430 struct adapter *sc = dev->rdev.adap;
431 struct ib_device *ibdev = &dev->ibdev;
432 struct iw_cm_verbs *iwcm;
435 CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, sc);
436 BUG_ON(!sc->port[0]);
437 strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name));
438 memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid));
439 memcpy(&ibdev->node_guid, sc->port[0]->vi[0].hw_addr, ETHER_ADDR_LEN);
440 ibdev->owner = THIS_MODULE;
441 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
443 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
444 ibdev->local_dma_lkey = 0;
445 ibdev->uverbs_cmd_mask =
446 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
447 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
448 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
449 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
450 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
451 (1ull << IB_USER_VERBS_CMD_REG_MR) |
452 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
453 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
454 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
455 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
456 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
457 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
458 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
459 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
460 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
461 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
462 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
463 (1ull << IB_USER_VERBS_CMD_POST_RECV);
464 ibdev->node_type = RDMA_NODE_RNIC;
465 strlcpy(ibdev->node_desc, C4IW_NODE_DESC, sizeof(ibdev->node_desc));
466 ibdev->phys_port_cnt = sc->params.nports;
467 ibdev->num_comp_vectors = 1;
468 ibdev->dma_device = NULL;
469 ibdev->query_device = c4iw_query_device;
470 ibdev->query_port = c4iw_query_port;
471 ibdev->modify_port = c4iw_modify_port;
472 ibdev->query_pkey = c4iw_query_pkey;
473 ibdev->query_gid = c4iw_query_gid;
474 ibdev->alloc_ucontext = c4iw_alloc_ucontext;
475 ibdev->dealloc_ucontext = c4iw_dealloc_ucontext;
476 ibdev->mmap = c4iw_mmap;
477 ibdev->alloc_pd = c4iw_allocate_pd;
478 ibdev->dealloc_pd = c4iw_deallocate_pd;
479 ibdev->create_ah = c4iw_ah_create;
480 ibdev->destroy_ah = c4iw_ah_destroy;
481 ibdev->create_qp = c4iw_create_qp;
482 ibdev->modify_qp = c4iw_ib_modify_qp;
483 ibdev->query_qp = c4iw_ib_query_qp;
484 ibdev->destroy_qp = c4iw_destroy_qp;
485 ibdev->create_cq = c4iw_create_cq;
486 ibdev->destroy_cq = c4iw_destroy_cq;
487 ibdev->resize_cq = c4iw_resize_cq;
488 ibdev->poll_cq = c4iw_poll_cq;
489 ibdev->get_dma_mr = c4iw_get_dma_mr;
490 ibdev->reg_user_mr = c4iw_reg_user_mr;
491 ibdev->dereg_mr = c4iw_dereg_mr;
492 ibdev->alloc_mw = c4iw_alloc_mw;
493 ibdev->dealloc_mw = c4iw_dealloc_mw;
494 ibdev->alloc_mr = c4iw_alloc_mr;
495 ibdev->map_mr_sg = c4iw_map_mr_sg;
496 ibdev->attach_mcast = c4iw_multicast_attach;
497 ibdev->detach_mcast = c4iw_multicast_detach;
498 ibdev->process_mad = c4iw_process_mad;
499 ibdev->req_notify_cq = c4iw_arm_cq;
500 ibdev->post_send = c4iw_post_send;
501 ibdev->post_recv = c4iw_post_receive;
502 ibdev->uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
503 ibdev->get_port_immutable = c4iw_port_immutable;
505 iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL);
509 iwcm->connect = c4iw_connect;
510 iwcm->accept = c4iw_accept_cr;
511 iwcm->reject = c4iw_reject_cr;
512 iwcm->create_listen = c4iw_create_listen;
513 iwcm->destroy_listen = c4iw_destroy_listen;
514 iwcm->add_ref = c4iw_qp_add_ref;
515 iwcm->rem_ref = c4iw_qp_rem_ref;
516 iwcm->get_qp = c4iw_get_qp;
519 ret = ib_register_device(&dev->ibdev, NULL);
527 c4iw_unregister_device(struct c4iw_dev *dev)
530 CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev,
532 ib_unregister_device(&dev->ibdev);
533 kfree(dev->ibdev.iwcm);