2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #define LINUXKPI_PARAM_PREFIX iw_cxgbe_
42 #include <asm/pgtable.h>
43 #include <linux/page.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
50 static int fastreg_support = 1;
51 module_param(fastreg_support, int, 0644);
52 MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)");
54 static int c4iw_modify_port(struct ib_device *ibdev,
55 u8 port, int port_modify_mask,
56 struct ib_port_modify *props)
61 static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
62 struct ib_ah_attr *ah_attr)
64 return ERR_PTR(-ENOSYS);
67 static int c4iw_ah_destroy(struct ib_ah *ah)
72 static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
77 static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
82 static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
83 u8 port_num, const struct ib_wc *in_wc,
84 const struct ib_grh *in_grh,
85 const struct ib_mad_hdr *in_mad,
87 struct ib_mad_hdr *out_mad,
89 u16 *out_mad_pkey_index)
95 void _c4iw_free_ucontext(struct kref *kref)
97 struct c4iw_ucontext *ucontext;
99 struct c4iw_mm_entry *mm, *tmp;
101 ucontext = container_of(kref, struct c4iw_ucontext, kref);
102 rhp = to_c4iw_dev(ucontext->ibucontext.device);
104 CTR2(KTR_IW_CXGBE, "%s ucontext %p", __func__, ucontext);
105 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
107 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
111 static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
113 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
115 CTR2(KTR_IW_CXGBE, "%s context %p", __func__, context);
116 c4iw_put_ucontext(ucontext);
120 static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
121 struct ib_udata *udata)
123 struct c4iw_ucontext *context;
124 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
126 struct c4iw_alloc_ucontext_resp uresp;
128 struct c4iw_mm_entry *mm = NULL;
130 PDBG("%s ibdev %p\n", __func__, ibdev);
131 context = kzalloc(sizeof(*context), GFP_KERNEL);
137 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
138 INIT_LIST_HEAD(&context->mmaps);
139 spin_lock_init(&context->mmap_lock);
140 kref_init(&context->kref);
142 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
144 log(LOG_ERR, "%s Warning - downlevel libcxgb4 "
145 "(non-fatal), device status page disabled.\n",
147 rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
150 mm = kmalloc(sizeof *mm, GFP_KERNEL);
154 uresp.status_page_size = PAGE_SIZE;
156 spin_lock(&context->mmap_lock);
157 uresp.status_page_key = context->key;
158 context->key += PAGE_SIZE;
159 spin_unlock(&context->mmap_lock);
161 ret = ib_copy_to_udata(udata, &uresp,
162 sizeof(uresp) - sizeof(uresp.reserved));
166 mm->key = uresp.status_page_key;
167 mm->addr = vtophys(rhp->rdev.status_page);
169 insert_mmap(context, mm);
171 return &context->ibucontext;
180 static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
182 int len = vma->vm_end - vma->vm_start;
183 u32 key = vma->vm_pgoff << PAGE_SHIFT;
184 struct c4iw_rdev *rdev;
186 struct c4iw_mm_entry *mm;
187 struct c4iw_ucontext *ucontext;
190 CTR4(KTR_IW_CXGBE, "%s:1 ctx %p vma %p, vm_start %u", __func__,
191 context, vma, vma->vm_start);
193 CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
194 vma->vm_pgoff, key, len);
196 if (vma->vm_start & (PAGE_SIZE-1)) {
197 CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
198 __func__, vma->vm_start, vma);
202 rdev = &(to_c4iw_dev(context->device)->rdev);
203 ucontext = to_c4iw_ucontext(context);
205 mm = remove_mmap(ucontext, key, len);
207 CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
214 /* user DB-GTS registers if addr in udbs_res range,
215 * else WQ or CQ memory.
217 if (rdev->adap->iwt.wc_en && addr >= rdev->bar2_pa &&
218 addr < rdev->bar2_pa + rdev->bar2_len)
219 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
221 ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
222 len, vma->vm_page_prot);
223 CTR4(KTR_IW_CXGBE, "%s:4 ctx %p vma %p ret %u", __func__, context, vma,
229 c4iw_deallocate_pd(struct ib_pd *pd)
231 struct c4iw_pd *php = to_c4iw_pd(pd);
232 struct c4iw_dev *rhp = php->rhp;
234 CTR3(KTR_IW_CXGBE, "%s: pd %p, pdid 0x%x", __func__, pd, php->pdid);
236 c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
237 mutex_lock(&rhp->rdev.stats.lock);
238 rhp->rdev.stats.pd.cur--;
239 mutex_unlock(&rhp->rdev.stats.lock);
245 static struct ib_pd *
246 c4iw_allocate_pd(struct ib_device *ibdev, struct ib_ucontext *context,
247 struct ib_udata *udata)
251 struct c4iw_dev *rhp;
253 CTR4(KTR_IW_CXGBE, "%s: ibdev %p, context %p, data %p", __func__, ibdev,
255 rhp = (struct c4iw_dev *) ibdev;
256 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
258 return ERR_PTR(-EINVAL);
259 php = kzalloc(sizeof(*php), GFP_KERNEL);
261 c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
262 return ERR_PTR(-ENOMEM);
267 if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
268 c4iw_deallocate_pd(&php->ibpd);
269 return ERR_PTR(-EFAULT);
272 mutex_lock(&rhp->rdev.stats.lock);
273 rhp->rdev.stats.pd.cur++;
274 if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
275 rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
276 mutex_unlock(&rhp->rdev.stats.lock);
279 "%s: ibdev %p, context %p, data %p, pddid 0x%x, pd %p", __func__,
280 ibdev, context, udata, pdid, php);
285 c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
288 CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, pkey %p", __func__,
289 ibdev, port, index, pkey);
296 c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid)
298 struct c4iw_dev *dev;
299 struct port_info *pi;
302 CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, gid %p", __func__,
303 ibdev, port, index, gid);
305 memset(&gid->raw[0], 0, sizeof(gid->raw));
306 dev = to_c4iw_dev(ibdev);
308 if (port == 0 || port > sc->params.nports)
310 pi = sc->port[port - 1];
311 memcpy(&gid->raw[0], pi->vi[0].hw_addr, ETHER_ADDR_LEN);
316 c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
317 struct ib_udata *uhw)
319 struct c4iw_dev *dev = to_c4iw_dev(ibdev);
320 struct adapter *sc = dev->rdev.adap;
322 CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props);
324 if (uhw->inlen || uhw->outlen)
327 memset(props, 0, sizeof *props);
328 memcpy(&props->sys_image_guid, sc->port[0]->vi[0].hw_addr,
330 props->hw_ver = sc->params.chipid;
331 props->fw_ver = sc->params.fw_vers;
332 props->device_cap_flags = dev->device_cap_flags;
333 props->page_size_cap = T4_PAGESIZE_MASK;
334 props->vendor_id = pci_get_vendor(sc->dev);
335 props->vendor_part_id = pci_get_device(sc->dev);
336 props->max_mr_size = T4_MAX_MR_SIZE;
337 props->max_qp = sc->vres.qp.size / 2;
338 props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
339 props->max_sge = T4_MAX_RECV_SGE;
340 props->max_sge_rd = 1;
341 props->max_res_rd_atom = sc->params.max_ird_adapter;
342 props->max_qp_rd_atom = min(sc->params.max_ordird_qp,
343 c4iw_max_read_depth);
344 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
345 props->max_cq = sc->vres.qp.size;
346 props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
347 props->max_mr = c4iw_num_stags(&dev->rdev);
348 props->max_pd = T4_MAX_NUM_PD;
349 props->local_ca_ack_delay = 0;
350 props->max_fast_reg_page_list_len =
351 t4_max_fr_depth(sc->params.ulptx_memwrite_dsgl && use_dsgl);
357 * Returns -errno on failure.
360 c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
362 struct c4iw_dev *dev;
364 struct port_info *pi;
367 CTR4(KTR_IW_CXGBE, "%s ibdev %p, port %d, props %p", __func__, ibdev,
370 dev = to_c4iw_dev(ibdev);
372 if (port > sc->params.nports)
374 pi = sc->port[port - 1];
377 memset(props, 0, sizeof(struct ib_port_attr));
378 props->max_mtu = IB_MTU_4096;
379 if (ifp->if_mtu >= 4096)
380 props->active_mtu = IB_MTU_4096;
381 else if (ifp->if_mtu >= 2048)
382 props->active_mtu = IB_MTU_2048;
383 else if (ifp->if_mtu >= 1024)
384 props->active_mtu = IB_MTU_1024;
385 else if (ifp->if_mtu >= 512)
386 props->active_mtu = IB_MTU_512;
388 props->active_mtu = IB_MTU_256;
389 props->state = pi->link_cfg.link_ok ? IB_PORT_ACTIVE : IB_PORT_DOWN;
390 props->port_cap_flags =
392 IB_PORT_SNMP_TUNNEL_SUP |
394 IB_PORT_DEVICE_MGMT_SUP |
395 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
396 props->gid_tbl_len = 1;
397 props->pkey_tbl_len = 1;
398 props->active_width = 2;
399 props->active_speed = 2;
400 props->max_msg_sz = -1;
405 static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
406 struct ib_port_immutable *immutable)
408 struct ib_port_attr attr;
411 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
413 err = ib_query_port(ibdev, port_num, &attr);
417 immutable->pkey_tbl_len = attr.pkey_tbl_len;
418 immutable->gid_tbl_len = attr.gid_tbl_len;
424 * Returns -errno on error.
427 c4iw_register_device(struct c4iw_dev *dev)
429 struct adapter *sc = dev->rdev.adap;
430 struct ib_device *ibdev = &dev->ibdev;
431 struct iw_cm_verbs *iwcm;
434 CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, sc);
435 BUG_ON(!sc->port[0]);
436 strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name));
437 memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid));
438 memcpy(&ibdev->node_guid, sc->port[0]->vi[0].hw_addr, ETHER_ADDR_LEN);
439 ibdev->owner = THIS_MODULE;
440 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
442 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
443 ibdev->local_dma_lkey = 0;
444 ibdev->uverbs_cmd_mask =
445 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
446 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
447 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
448 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
449 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
450 (1ull << IB_USER_VERBS_CMD_REG_MR) |
451 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
452 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
453 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
454 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
455 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
456 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
457 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
458 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
459 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
460 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
461 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
462 (1ull << IB_USER_VERBS_CMD_POST_RECV);
463 ibdev->node_type = RDMA_NODE_RNIC;
464 strlcpy(ibdev->node_desc, C4IW_NODE_DESC, sizeof(ibdev->node_desc));
465 ibdev->phys_port_cnt = sc->params.nports;
466 ibdev->num_comp_vectors = 1;
467 ibdev->dma_device = NULL;
468 ibdev->query_device = c4iw_query_device;
469 ibdev->query_port = c4iw_query_port;
470 ibdev->modify_port = c4iw_modify_port;
471 ibdev->query_pkey = c4iw_query_pkey;
472 ibdev->query_gid = c4iw_query_gid;
473 ibdev->alloc_ucontext = c4iw_alloc_ucontext;
474 ibdev->dealloc_ucontext = c4iw_dealloc_ucontext;
475 ibdev->mmap = c4iw_mmap;
476 ibdev->alloc_pd = c4iw_allocate_pd;
477 ibdev->dealloc_pd = c4iw_deallocate_pd;
478 ibdev->create_ah = c4iw_ah_create;
479 ibdev->destroy_ah = c4iw_ah_destroy;
480 ibdev->create_qp = c4iw_create_qp;
481 ibdev->modify_qp = c4iw_ib_modify_qp;
482 ibdev->query_qp = c4iw_ib_query_qp;
483 ibdev->destroy_qp = c4iw_destroy_qp;
484 ibdev->create_cq = c4iw_create_cq;
485 ibdev->destroy_cq = c4iw_destroy_cq;
486 ibdev->resize_cq = c4iw_resize_cq;
487 ibdev->poll_cq = c4iw_poll_cq;
488 ibdev->get_dma_mr = c4iw_get_dma_mr;
489 ibdev->reg_user_mr = c4iw_reg_user_mr;
490 ibdev->dereg_mr = c4iw_dereg_mr;
491 ibdev->alloc_mw = c4iw_alloc_mw;
492 ibdev->dealloc_mw = c4iw_dealloc_mw;
493 ibdev->alloc_mr = c4iw_alloc_mr;
494 ibdev->map_mr_sg = c4iw_map_mr_sg;
495 ibdev->attach_mcast = c4iw_multicast_attach;
496 ibdev->detach_mcast = c4iw_multicast_detach;
497 ibdev->process_mad = c4iw_process_mad;
498 ibdev->req_notify_cq = c4iw_arm_cq;
499 ibdev->post_send = c4iw_post_send;
500 ibdev->post_recv = c4iw_post_receive;
501 ibdev->uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
502 ibdev->get_port_immutable = c4iw_port_immutable;
504 iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL);
508 iwcm->connect = c4iw_connect;
509 iwcm->accept = c4iw_accept_cr;
510 iwcm->reject = c4iw_reject_cr;
511 iwcm->create_listen = c4iw_create_listen;
512 iwcm->destroy_listen = c4iw_destroy_listen;
513 iwcm->add_ref = c4iw_qp_add_ref;
514 iwcm->rem_ref = c4iw_qp_rem_ref;
515 iwcm->get_qp = c4iw_get_qp;
518 ret = ib_register_device(&dev->ibdev, NULL);
526 c4iw_unregister_device(struct c4iw_dev *dev)
529 CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev,
531 ib_unregister_device(&dev->ibdev);
532 kfree(dev->ibdev.iwcm);