2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
4 * Copyright (c) 2018 - 2022 Intel Corporation
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "irdma_main.h"
39 irdma_get_dev_fw_str(struct ib_device *dev,
43 struct irdma_device *iwdev = to_iwdev(dev);
45 snprintf(str, str_len, "%u.%u",
46 irdma_fw_major_ver(&iwdev->rf->sc_dev),
47 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
51 irdma_add_gid(struct ib_device *device,
54 const union ib_gid *gid,
55 const struct ib_gid_attr *attr,
62 irdma_del_gid(struct ib_device *device,
71 * irdma_alloc_mr - register stag for fast memory registration
73 * @mr_type: memory for stag registrion
74 * @max_num_sg: man number of pages
78 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
79 u32 max_num_sg, struct ib_udata *udata)
81 struct irdma_device *iwdev = to_iwdev(pd->device);
82 struct irdma_pble_alloc *palloc;
83 struct irdma_pbl *iwpbl;
84 struct irdma_mr *iwmr;
87 int err_code = -ENOMEM;
89 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
91 return ERR_PTR(-ENOMEM);
93 stag = irdma_create_stag(iwdev);
100 iwmr->ibmr.rkey = stag;
101 iwmr->ibmr.lkey = stag;
103 iwmr->ibmr.device = pd->device;
104 iwpbl = &iwmr->iwpbl;
106 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
107 palloc = &iwpbl->pble_alloc;
108 iwmr->page_cnt = max_num_sg;
109 status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
114 err_code = irdma_hw_alloc_stag(iwdev, iwmr);
118 iwpbl->pbl_allocated = true;
122 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
124 irdma_free_stag(iwdev, stag);
128 return ERR_PTR(err_code);
132 * irdma_alloc_ucontext - Allocate the user context data structure
136 * This keeps track of all objects associated with a particular
140 irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
142 struct ib_device *ibdev = uctx->device;
143 struct irdma_device *iwdev = to_iwdev(ibdev);
144 struct irdma_alloc_ucontext_req req;
145 struct irdma_alloc_ucontext_resp uresp = {0};
146 struct irdma_ucontext *ucontext = to_ucontext(uctx);
147 struct irdma_uk_attrs *uk_attrs;
149 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
152 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
155 ucontext->iwdev = iwdev;
156 ucontext->abi_ver = req.userspace_ver;
158 uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
159 /* GEN_1 support for libi40iw */
160 if (udata->outlen < sizeof(uresp)) {
161 if (uk_attrs->hw_rev != IRDMA_GEN_1)
164 ucontext->legacy_mode = true;
165 uresp.max_qps = iwdev->rf->max_qp;
166 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
167 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
168 uresp.kernel_ver = req.userspace_ver;
169 if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)))
173 (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
174 ucontext->db_mmap_entry =
175 irdma_user_mmap_entry_insert(ucontext, bar_off,
178 if (!ucontext->db_mmap_entry) {
181 uresp.kernel_ver = IRDMA_ABI_VER;
182 uresp.feature_flags = uk_attrs->feature_flags;
183 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
184 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
185 uresp.max_hw_inline = uk_attrs->max_hw_inline;
186 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
187 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
188 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
189 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
190 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
191 uresp.hw_rev = uk_attrs->hw_rev;
192 if (ib_copy_to_udata(udata, &uresp,
193 min(sizeof(uresp), udata->outlen))) {
194 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
199 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
200 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
201 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
202 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
203 INIT_LIST_HEAD(&ucontext->vma_list);
204 mutex_init(&ucontext->vma_list_mutex);
209 irdma_dev_err(&iwdev->rf->sc_dev,
210 "Invalid userspace driver version detected. Detected version %d, should be %d\n",
211 req.userspace_ver, IRDMA_ABI_VER);
216 * irdma_dealloc_ucontext - deallocate the user context data structure
217 * @context: user context created during alloc
220 irdma_dealloc_ucontext(struct ib_ucontext *context)
222 struct irdma_ucontext *ucontext = to_ucontext(context);
224 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
230 * irdma_alloc_pd - allocate protection domain
231 * @pd: protection domain
235 irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
237 struct irdma_pd *iwpd = to_iwpd(pd);
238 struct irdma_device *iwdev = to_iwdev(pd->device);
239 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
240 struct irdma_pci_f *rf = iwdev->rf;
241 struct irdma_alloc_pd_resp uresp = {0};
242 struct irdma_sc_pd *sc_pd;
246 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
251 sc_pd = &iwpd->sc_pd;
253 struct irdma_ucontext *ucontext =
254 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
257 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
259 if (ib_copy_to_udata(udata, &uresp,
260 min(sizeof(uresp), udata->outlen))) {
265 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
272 irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
278 irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
280 struct irdma_pd *iwpd = to_iwpd(ibpd);
281 struct irdma_device *iwdev = to_iwdev(ibpd->device);
283 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
287 irdma_fill_ah_info(struct vnet *vnet,
288 struct irdma_ah_info *ah_info,
289 const struct ib_gid_attr *sgid_attr,
290 struct sockaddr *sgid_addr, struct sockaddr *dgid_addr,
291 u8 *dmac, u8 net_type)
293 if (net_type == RDMA_NETWORK_IPV4) {
294 ah_info->ipv4_valid = true;
295 ah_info->dest_ip_addr[0] =
296 ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr);
297 ah_info->src_ip_addr[0] =
298 ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr);
299 ah_info->do_lpbk = irdma_ipv4_is_lpb(vnet,
300 ah_info->src_ip_addr[0],
301 ah_info->dest_ip_addr[0]);
302 if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) {
303 irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
306 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
307 ((struct sockaddr_in6 *)dgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
308 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
309 ((struct sockaddr_in6 *)sgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
310 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
311 ah_info->dest_ip_addr);
312 if (rdma_is_multicast_addr(&((struct sockaddr_in6 *)dgid_addr)->sin6_addr)) {
313 irdma_mcast_mac_v6(ah_info->dest_ip_addr, dmac);
319 irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
320 struct irdma_ah_info *ah_info,
321 const struct ib_gid_attr *sgid_attr,
324 if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev))
325 ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
327 ah_info->vlan_tag = VLAN_N_VID;
329 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, dmac);
331 if (ah_info->dst_arpindex == -1)
334 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
335 ah_info->vlan_tag = 0;
337 if (ah_info->vlan_tag < VLAN_N_VID) {
338 ah_info->insert_vlan_tag = true;
340 rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
346 irdma_create_ah_wait(struct irdma_pci_f *rf,
347 struct irdma_sc_ah *sc_ah, bool sleep)
350 int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
353 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
355 } while (!sc_ah->ah_info.ah_valid && --cnt);
364 * irdma_create_ah - create address handle
366 * @attr: address handle attributes
367 * @flags: AH flags to wait
370 * returns 0 on success, error otherwise
373 irdma_create_ah(struct ib_ah *ib_ah,
374 struct ib_ah_attr *attr, u32 flags,
375 struct ib_udata *udata)
377 struct irdma_pd *pd = to_iwpd(ib_ah->pd);
378 struct irdma_ah *ah = container_of(ib_ah, struct irdma_ah, ibah);
379 struct irdma_device *iwdev = to_iwdev(ib_ah->pd->device);
381 struct ib_gid_attr sgid_attr;
382 struct irdma_pci_f *rf = iwdev->rf;
383 struct irdma_sc_ah *sc_ah;
385 struct irdma_ah_info *ah_info;
386 struct irdma_create_ah_resp uresp;
388 struct sockaddr saddr;
389 struct sockaddr_in saddr_in;
390 struct sockaddr_in6 saddr_in6;
391 } sgid_addr, dgid_addr;
396 err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
397 rf->max_ah, &ah_id, &rf->next_ah);
404 sc_ah->ah_info.ah_idx = ah_id;
405 sc_ah->ah_info.vsi = &iwdev->vsi;
406 irdma_sc_init_ah(&rf->sc_dev, sc_ah);
407 ah->sgid_index = attr->grh.sgid_index;
408 memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
410 err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
411 attr->grh.sgid_index, &sgid, &sgid_attr);
414 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
415 "GID lookup at idx=%d with port=%d failed\n",
416 attr->grh.sgid_index, attr->port_num);
420 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
421 rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
422 ah->av.attrs = *attr;
423 ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr,
428 dev_put(sgid_attr.ndev);
430 ah->av.sgid_addr.saddr = sgid_addr.saddr;
431 ah->av.dgid_addr.saddr = dgid_addr.saddr;
432 ah_info = &sc_ah->ah_info;
433 ah_info->ah_idx = ah_id;
434 ah_info->pd_idx = pd->sc_pd.pd_id;
435 ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev));
437 if (attr->ah_flags & IB_AH_GRH) {
438 ah_info->flow_label = attr->grh.flow_label;
439 ah_info->hop_ttl = attr->grh.hop_limit;
440 ah_info->tc_tos = attr->grh.traffic_class;
443 ether_addr_copy(dmac, attr->dmac);
445 irdma_fill_ah_info(iwdev->netdev->if_vnet,
446 ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
447 dmac, ah->av.net_type);
449 err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
453 sleep = flags & RDMA_CREATE_AH_SLEEPABLE;
455 err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
456 sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
458 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
459 "CQP-OP Create AH fail");
463 err = irdma_create_ah_wait(rf, sc_ah, sleep);
465 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
466 "CQP create AH timed out");
471 uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
472 err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
476 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
481 irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
483 ether_addr_copy(dmac, attr->dmac);
487 irdma_create_ah_stub(struct ib_ah *ib_ah,
488 struct ib_ah_attr *attr, u32 flags,
489 struct ib_udata *udata)
495 irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags)
501 * irdma_free_qp_rsrc - free up memory resources for qp
502 * @iwqp: qp ptr (user or kernel)
505 irdma_free_qp_rsrc(struct irdma_qp *iwqp)
507 struct irdma_device *iwdev = iwqp->iwdev;
508 struct irdma_pci_f *rf = iwdev->rf;
509 u32 qp_num = iwqp->ibqp.qp_num;
511 irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
512 irdma_dealloc_push_page(rf, &iwqp->sc_qp);
513 if (iwqp->sc_qp.vsi) {
514 irdma_qp_rem_qos(&iwqp->sc_qp);
515 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
516 iwqp->sc_qp.user_pri);
520 irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
521 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
522 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
523 kfree(iwqp->kqp.sig_trk_mem);
524 iwqp->kqp.sig_trk_mem = NULL;
525 kfree(iwqp->kqp.sq_wrid_mem);
526 kfree(iwqp->kqp.rq_wrid_mem);
527 kfree(iwqp->sg_list);
532 * irdma_create_qp - create qp
534 * @init_attr: attributes for qp
535 * @udata: user data for create qp
538 irdma_create_qp(struct ib_pd *ibpd,
539 struct ib_qp_init_attr *init_attr,
540 struct ib_udata *udata)
542 struct irdma_pd *iwpd = to_iwpd(ibpd);
543 struct irdma_device *iwdev = to_iwdev(ibpd->device);
544 struct irdma_pci_f *rf = iwdev->rf;
545 struct irdma_qp *iwqp;
546 struct irdma_create_qp_req req;
547 struct irdma_create_qp_resp uresp = {0};
553 struct irdma_sc_qp *qp;
554 struct irdma_sc_dev *dev = &rf->sc_dev;
555 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
556 struct irdma_qp_init_info init_info = {{0}};
557 struct irdma_qp_host_ctx_info *ctx_info;
560 err_code = irdma_validate_qp_attrs(init_attr, iwdev);
562 return ERR_PTR(err_code);
564 sq_size = init_attr->cap.max_send_wr;
565 rq_size = init_attr->cap.max_recv_wr;
567 init_info.vsi = &iwdev->vsi;
568 init_info.qp_uk_init_info.uk_attrs = uk_attrs;
569 init_info.qp_uk_init_info.sq_size = sq_size;
570 init_info.qp_uk_init_info.rq_size = rq_size;
571 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
572 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
573 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
575 iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
577 return ERR_PTR(-ENOMEM);
579 iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, sizeof(*iwqp->sg_list),
581 if (!iwqp->sg_list) {
583 return ERR_PTR(-ENOMEM);
587 qp->qp_uk.back_qp = iwqp;
588 qp->qp_uk.lock = &iwqp->lock;
589 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
592 iwqp->q2_ctx_mem.size = IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE;
593 iwqp->q2_ctx_mem.va = irdma_allocate_dma_mem(dev->hw, &iwqp->q2_ctx_mem,
594 iwqp->q2_ctx_mem.size,
596 if (!iwqp->q2_ctx_mem.va) {
597 kfree(iwqp->sg_list);
599 return ERR_PTR(-ENOMEM);
602 init_info.q2 = iwqp->q2_ctx_mem.va;
603 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
604 init_info.host_ctx = (__le64 *) (init_info.q2 + IRDMA_Q2_BUF_SIZE);
605 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
607 if (init_attr->qp_type == IB_QPT_GSI)
610 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
611 &qp_num, &rf->next_qp);
616 iwqp->ibqp.qp_num = qp_num;
618 iwqp->iwscq = to_iwcq(init_attr->send_cq);
619 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
620 iwqp->host_ctx.va = init_info.host_ctx;
621 iwqp->host_ctx.pa = init_info.host_ctx_pa;
622 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
624 init_info.pd = &iwpd->sc_pd;
625 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
626 if (!rdma_protocol_roce(&iwdev->ibdev, 1))
627 init_info.qp_uk_init_info.first_sq_wq = 1;
628 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
629 init_waitqueue_head(&iwqp->waitq);
630 init_waitqueue_head(&iwqp->mod_qp_waitq);
633 err_code = ib_copy_from_udata(&req, udata,
634 min(sizeof(req), udata->inlen));
636 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
637 "ib_copy_from_data fail\n");
641 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
643 if (req.user_wqe_bufs) {
644 struct irdma_ucontext *ucontext = to_ucontext(ibpd->uobject->context);
646 init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
647 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
648 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
649 &ucontext->qp_reg_mem_list);
650 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
654 irdma_debug(iwdev_to_idev(iwdev),
660 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
661 irdma_setup_virt_qp(iwdev, iwqp, &init_info);
663 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
664 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
665 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
669 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
670 "setup qp failed\n");
674 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
675 if (init_attr->qp_type == IB_QPT_RC) {
676 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
677 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
678 IRDMA_WRITE_WITH_IMM |
681 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
682 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
686 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
687 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
690 ret = irdma_sc_qp_init(qp, &init_info);
693 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
698 ctx_info = &iwqp->ctx_info;
699 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
700 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
702 if (rdma_protocol_roce(&iwdev->ibdev, 1))
703 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
705 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
707 err_code = irdma_cqp_create_qp_cmd(iwqp);
711 atomic_set(&iwqp->refcnt, 1);
712 spin_lock_init(&iwqp->lock);
713 spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
714 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
715 rf->qp_table[qp_num] = iwqp;
716 iwqp->max_send_wr = sq_size;
717 iwqp->max_recv_wr = rq_size;
719 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
720 if (dev->ws_add(&iwdev->vsi, 0)) {
721 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
726 irdma_qp_add_qos(&iwqp->sc_qp);
730 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
731 if (udata->outlen < sizeof(uresp)) {
733 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
735 if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
738 uresp.actual_sq_size = sq_size;
739 uresp.actual_rq_size = rq_size;
740 uresp.qp_id = qp_num;
741 uresp.qp_caps = qp->qp_uk.qp_caps;
743 err_code = ib_copy_to_udata(udata, &uresp,
744 min(sizeof(uresp), udata->outlen));
746 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
747 "copy_to_udata failed\n");
748 kc_irdma_destroy_qp(&iwqp->ibqp, udata);
749 return ERR_PTR(err_code);
753 init_completion(&iwqp->free_qp);
757 irdma_free_qp_rsrc(iwqp);
759 return ERR_PTR(err_code);
763 * irdma_destroy_qp - destroy qp
764 * @ibqp: qp's ib pointer also to get to device's qp address
768 irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
770 struct irdma_qp *iwqp = to_iwqp(ibqp);
771 struct irdma_device *iwdev = iwqp->iwdev;
773 if (iwqp->sc_qp.qp_uk.destroy_pending)
775 iwqp->sc_qp.qp_uk.destroy_pending = true;
776 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
777 irdma_modify_qp_to_err(&iwqp->sc_qp);
779 if (!iwqp->user_mode)
780 cancel_delayed_work_sync(&iwqp->dwork_flush);
782 irdma_qp_rem_ref(&iwqp->ibqp);
783 wait_for_completion(&iwqp->free_qp);
784 irdma_free_lsmm_rsrc(iwqp);
785 if (!iwdev->rf->reset &&
786 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
787 return -ENOTRECOVERABLE;
789 if (!iwqp->user_mode) {
791 irdma_clean_cqes(iwqp, iwqp->iwscq);
792 if (iwqp->iwrcq != iwqp->iwscq)
793 irdma_clean_cqes(iwqp, iwqp->iwrcq);
796 irdma_remove_push_mmap_entries(iwqp);
797 irdma_free_qp_rsrc(iwqp);
803 * irdma_create_cq - create cq
804 * @ibcq: CQ allocated
805 * @attr: attributes for cq
809 irdma_create_cq(struct ib_cq *ibcq,
810 const struct ib_cq_init_attr *attr,
811 struct ib_udata *udata)
813 struct ib_device *ibdev = ibcq->device;
814 struct irdma_device *iwdev = to_iwdev(ibdev);
815 struct irdma_pci_f *rf = iwdev->rf;
816 struct irdma_cq *iwcq = to_iwcq(ibcq);
818 struct irdma_sc_cq *cq;
819 struct irdma_sc_dev *dev = &rf->sc_dev;
820 struct irdma_cq_init_info info = {0};
822 struct irdma_cqp_request *cqp_request;
823 struct cqp_cmds_info *cqp_info;
824 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
827 int entries = attr->cqe;
829 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
832 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
838 atomic_set(&iwcq->refcnt, 1);
839 spin_lock_init(&iwcq->lock);
840 INIT_LIST_HEAD(&iwcq->resize_list);
841 INIT_LIST_HEAD(&iwcq->cmpl_generated);
843 ukinfo->cq_size = max(entries, 4);
844 ukinfo->cq_id = cq_num;
845 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
846 if (attr->comp_vector < rf->ceqs_count)
847 info.ceq_id = attr->comp_vector;
848 info.ceq_id_valid = true;
850 info.type = IRDMA_CQ_TYPE_IWARP;
851 info.vsi = &iwdev->vsi;
854 struct irdma_ucontext *ucontext;
855 struct irdma_create_cq_req req = {0};
856 struct irdma_cq_mr *cqmr;
857 struct irdma_pbl *iwpbl;
858 struct irdma_pbl *iwpbl_shadow;
859 struct irdma_cq_mr *cqmr_shadow;
861 iwcq->user_mode = true;
862 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
863 if (ib_copy_from_udata(&req, udata,
864 min(sizeof(req), udata->inlen))) {
869 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
870 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
871 &ucontext->cq_reg_mem_list);
872 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
878 iwcq->cq_mem_size = 0;
879 cqmr = &iwpbl->cq_mr;
881 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
882 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
883 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
884 iwpbl_shadow = irdma_get_pbl((unsigned long)req.user_shadow_area,
885 &ucontext->cq_reg_mem_list);
886 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
892 iwcq->iwpbl_shadow = iwpbl_shadow;
893 cqmr_shadow = &iwpbl_shadow->cq_mr;
894 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
897 info.shadow_area_pa = cqmr->shadow;
899 if (iwpbl->pbl_allocated) {
900 info.virtual_map = true;
901 info.pbl_chunk_size = 1;
902 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
904 info.cq_base_pa = cqmr->cq_pbl.addr;
907 /* Kmode allocations */
910 if (entries < 1 || entries > rf->max_cqe) {
916 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
918 ukinfo->cq_size = entries;
920 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
921 iwcq->kmem.size = round_up(rsize, 256);
922 iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem,
923 iwcq->kmem.size, 256);
924 if (!iwcq->kmem.va) {
929 iwcq->kmem_shadow.size = IRDMA_SHADOW_AREA_SIZE << 3;
930 iwcq->kmem_shadow.va = irdma_allocate_dma_mem(dev->hw,
932 iwcq->kmem_shadow.size,
935 if (!iwcq->kmem_shadow.va) {
939 info.shadow_area_pa = iwcq->kmem_shadow.pa;
940 ukinfo->shadow_area = iwcq->kmem_shadow.va;
941 ukinfo->cq_base = iwcq->kmem.va;
942 info.cq_base_pa = iwcq->kmem.pa;
945 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
946 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
947 (u32)IRDMA_MAX_CQ_READ_THRESH);
948 if (irdma_sc_cq_init(cq, &info)) {
949 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
955 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
960 cqp_info = &cqp_request->info;
961 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
962 cqp_info->post_sq = 1;
963 cqp_info->in.u.cq_create.cq = cq;
964 cqp_info->in.u.cq_create.check_overflow = true;
965 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
966 status = irdma_handle_cqp_op(rf, cqp_request);
967 irdma_put_cqp_request(&rf->cqp, cqp_request);
974 struct irdma_create_cq_resp resp = {0};
976 resp.cq_id = info.cq_uk_init_info.cq_id;
977 resp.cq_size = info.cq_uk_init_info.cq_size;
978 if (ib_copy_to_udata(udata, &resp,
979 min(sizeof(resp), udata->outlen))) {
980 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
981 "copy to user data\n");
987 rf->cq_table[cq_num] = iwcq;
988 init_completion(&iwcq->free_cq);
992 irdma_cq_wq_destroy(rf, cq);
994 irdma_cq_free_rsrc(rf, iwcq);
999 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
1000 * @iwmr: iwmr for IB's user page addresses
1001 * @pbl: ple pointer to save 1 level or 0 level pble
1002 * @level: indicated level 0, 1 or 2
1006 irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
1007 enum irdma_pble_level level)
1009 struct ib_umem *region = iwmr->region;
1010 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1011 int chunk_pages, entry, i;
1012 struct scatterlist *sg;
1014 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1015 struct irdma_pble_info *pinfo;
1019 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
1020 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1021 chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size);
1022 if (iwmr->type == IRDMA_MEMREG_TYPE_QP && !iwpbl->qp_mr.sq_page)
1023 iwpbl->qp_mr.sq_page = sg_page(sg);
1024 for (i = 0; i < chunk_pages; i++) {
1025 pg_addr = sg_dma_address(sg) + (i * iwmr->page_size);
1026 if ((entry + i) == 0)
1027 *pbl = pg_addr & iwmr->page_msk;
1028 else if (!(pg_addr & ~iwmr->page_msk))
1032 if (++pbl_cnt == palloc->total_cnt)
1034 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
1040 * irdma_destroy_ah - Destroy address handle
1041 * @ibah: pointer to address handle
1042 * @ah_flags: destroy flags
1046 irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
1048 struct irdma_device *iwdev = to_iwdev(ibah->device);
1049 struct irdma_ah *ah = to_iwah(ibah);
1051 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
1054 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
1055 ah->sc_ah.ah_info.ah_idx);
1059 irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1061 struct ib_pd *ibpd = ib_mr->pd;
1062 struct irdma_pd *iwpd = to_iwpd(ibpd);
1063 struct irdma_mr *iwmr = to_iwmr(ib_mr);
1064 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
1065 struct irdma_dealloc_stag_info *info;
1066 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1067 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1068 struct irdma_cqp_request *cqp_request;
1069 struct cqp_cmds_info *cqp_info;
1072 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
1074 struct irdma_ucontext *ucontext;
1076 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1077 irdma_del_memlist(iwmr, ucontext);
1082 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
1086 cqp_info = &cqp_request->info;
1087 info = &cqp_info->in.u.dealloc_stag.info;
1088 memset(info, 0, sizeof(*info));
1089 info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
1090 info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
1092 if (iwpbl->pbl_allocated)
1093 info->dealloc_pbl = true;
1095 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
1096 cqp_info->post_sq = 1;
1097 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
1098 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
1099 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
1100 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
1104 irdma_free_stag(iwdev, iwmr->stag);
1106 if (iwpbl->pbl_allocated)
1107 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
1110 ib_umem_release(iwmr->region);
1117 kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
1122 struct ib_gid_attr sgid_attr;
1123 struct irdma_av *av = &iwqp->roce_ah.av;
1125 ret = ib_get_cached_gid(iwqp->ibqp.device, attr->ah_attr.port_num,
1126 attr->ah_attr.grh.sgid_index, &sgid,
1131 if (sgid_attr.ndev) {
1132 *vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
1133 ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, IF_LLADDR(sgid_attr.ndev));
1136 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
1138 dev_put(sgid_attr.ndev);
1144 * irdma_destroy_cq - destroy cq
1145 * @ib_cq: cq pointer
1149 irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1151 struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1152 struct irdma_cq *iwcq = to_iwcq(ib_cq);
1153 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1154 struct irdma_sc_dev *dev = cq->dev;
1155 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1156 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1157 unsigned long flags;
1159 spin_lock_irqsave(&iwcq->lock, flags);
1160 if (!list_empty(&iwcq->cmpl_generated))
1161 irdma_remove_cmpls_list(iwcq);
1162 if (!list_empty(&iwcq->resize_list))
1163 irdma_process_resize_list(iwcq, iwdev, NULL);
1164 spin_unlock_irqrestore(&iwcq->lock, flags);
1166 irdma_cq_rem_ref(ib_cq);
1167 wait_for_completion(&iwcq->free_cq);
1169 irdma_cq_wq_destroy(iwdev->rf, cq);
1170 irdma_cq_free_rsrc(iwdev->rf, iwcq);
1172 spin_lock_irqsave(&iwceq->ce_lock, flags);
1173 irdma_sc_cleanup_ceqes(cq, ceq);
1174 spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1178 * irdma_alloc_mw - Allocate memory window
1179 * @pd: Protection domain
1180 * @type: Window type
1181 * @udata: user data pointer
1184 irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1185 struct ib_udata *udata)
1187 struct irdma_device *iwdev = to_iwdev(pd->device);
1188 struct irdma_mr *iwmr;
1192 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1194 return ERR_PTR(-ENOMEM);
1196 stag = irdma_create_stag(iwdev);
1199 return ERR_PTR(-ENOMEM);
1203 iwmr->ibmw.rkey = stag;
1205 iwmr->ibmw.type = type;
1206 iwmr->ibmw.device = pd->device;
1208 err_code = irdma_hw_alloc_mw(iwdev, iwmr);
1210 irdma_free_stag(iwdev, stag);
1212 return ERR_PTR(err_code);
1219 * kc_set_loc_seq_num_mss - Set local seq number and mss
1220 * @cm_node: cm node info
1223 kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node)
1228 cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
1229 if (cm_node->iwdev->vsi.mtu > 1500 &&
1230 2 * cm_node->iwdev->vsi.mtu > cm_node->iwdev->rcv_wnd)
1231 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1232 (1500 - IRDMA_MTU_TO_MSS_IPV4) :
1233 (1500 - IRDMA_MTU_TO_MSS_IPV6);
1235 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1236 (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4) :
1237 (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6);
1241 * irdma_disassociate_ucontext - Disassociate user context
1242 * @context: ib user context
1245 irdma_disassociate_ucontext(struct ib_ucontext *context)
1250 ib_device_get_by_netdev(struct ifnet *netdev, int driver_id)
1252 struct irdma_device *iwdev;
1253 struct irdma_handler *hdl;
1254 unsigned long flags;
1256 spin_lock_irqsave(&irdma_handler_lock, flags);
1257 list_for_each_entry(hdl, &irdma_handlers, list) {
1259 if (netdev == iwdev->netdev) {
1260 spin_unlock_irqrestore(&irdma_handler_lock,
1262 return &iwdev->ibdev;
1265 spin_unlock_irqrestore(&irdma_handler_lock, flags);
1271 ib_unregister_device_put(struct ib_device *device)
1273 ib_unregister_device(device);
1277 * irdma_query_gid_roce - Query port GID for Roce
1278 * @ibdev: device pointer from stack
1279 * @port: port number
1280 * @index: Entry index
1284 irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
1289 ret = rdma_query_gid(ibdev, port, index, gid);
1290 if (ret == -EAGAIN) {
1291 memcpy(gid, &zgid, sizeof(*gid));
1299 * irdma_modify_port - modify port attributes
1300 * @ibdev: device pointer from stack
1301 * @port: port number for query
1302 * @mask: Property mask
1303 * @props: returning device attributes
1306 irdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
1307 struct ib_port_modify *props)
1316 * irdma_query_pkey - Query partition key
1317 * @ibdev: device pointer from stack
1318 * @port: port number
1319 * @index: index of pkey
1320 * @pkey: pointer to store the pkey
1323 irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1326 if (index >= IRDMA_PKEY_TBL_SZ)
1329 *pkey = IRDMA_DEFAULT_PKEY;
1334 irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
1335 struct ib_port_immutable *immutable)
1337 struct ib_port_attr attr;
1340 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
1341 err = ib_query_port(ibdev, port_num, &attr);
1345 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1346 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1347 immutable->gid_tbl_len = attr.gid_tbl_len;
1353 irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
1354 struct ib_port_immutable *immutable)
1356 struct ib_port_attr attr;
1359 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
1360 err = ib_query_port(ibdev, port_num, &attr);
1363 immutable->gid_tbl_len = 1;
1369 * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
1370 * @link_speed: netdev phy link speed
1371 * @active_speed: IB port speed
1372 * @active_width: IB port width
1375 irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
1378 if (link_speed <= SPEED_1000) {
1379 *active_width = IB_WIDTH_1X;
1380 *active_speed = IB_SPEED_SDR;
1381 } else if (link_speed <= SPEED_10000) {
1382 *active_width = IB_WIDTH_1X;
1383 *active_speed = IB_SPEED_FDR10;
1384 } else if (link_speed <= SPEED_20000) {
1385 *active_width = IB_WIDTH_4X;
1386 *active_speed = IB_SPEED_DDR;
1387 } else if (link_speed <= SPEED_25000) {
1388 *active_width = IB_WIDTH_1X;
1389 *active_speed = IB_SPEED_EDR;
1390 } else if (link_speed <= SPEED_40000) {
1391 *active_width = IB_WIDTH_4X;
1392 *active_speed = IB_SPEED_FDR10;
1394 *active_width = IB_WIDTH_4X;
1395 *active_speed = IB_SPEED_EDR;
1400 * irdma_query_port - get port attributes
1401 * @ibdev: device pointer from stack
1402 * @port: port number for query
1403 * @props: returning device attributes
1406 irdma_query_port(struct ib_device *ibdev, u8 port,
1407 struct ib_port_attr *props)
1409 struct irdma_device *iwdev = to_iwdev(ibdev);
1410 struct ifnet *netdev = iwdev->netdev;
1412 /* no need to zero out pros here. done by caller */
1414 props->max_mtu = IB_MTU_4096;
1415 props->active_mtu = ib_mtu_int_to_enum(netdev->if_mtu);
1420 if ((netdev->if_link_state == LINK_STATE_UP) && (netdev->if_drv_flags & IFF_DRV_RUNNING)) {
1421 props->state = IB_PORT_ACTIVE;
1422 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
1424 props->state = IB_PORT_DOWN;
1425 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
1427 irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
1428 &props->active_width);
1430 if (rdma_protocol_roce(ibdev, 1)) {
1431 props->gid_tbl_len = 32;
1432 kc_set_props_ip_gid_caps(props);
1433 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
1435 props->gid_tbl_len = 1;
1437 props->qkey_viol_cntr = 0;
1438 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
1439 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
1444 extern const char *const irdma_hw_stat_names[];
1447 * irdma_alloc_hw_stats - Allocate a hw stats structure
1448 * @ibdev: device pointer from stack
1449 * @port_num: port number
1451 struct rdma_hw_stats *
1452 irdma_alloc_hw_stats(struct ib_device *ibdev,
1455 struct irdma_device *iwdev = to_iwdev(ibdev);
1456 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1458 int num_counters = dev->hw_attrs.max_stat_idx;
1459 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
1461 return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
1466 * irdma_get_hw_stats - Populates the rdma_hw_stats structure
1467 * @ibdev: device pointer from stack
1468 * @stats: stats pointer from stack
1469 * @port_num: port number
1470 * @index: which hw counter the stack is requesting we update
1473 irdma_get_hw_stats(struct ib_device *ibdev,
1474 struct rdma_hw_stats *stats, u8 port_num,
1477 struct irdma_device *iwdev = to_iwdev(ibdev);
1478 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
1480 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
1481 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
1483 memcpy(&stats->value[0], hw_stats, sizeof(u64)* stats->num_counters);
1485 return stats->num_counters;
1489 * irdma_query_gid - Query port GID
1490 * @ibdev: device pointer from stack
1491 * @port: port number
1492 * @index: Entry index
1496 irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
1499 struct irdma_device *iwdev = to_iwdev(ibdev);
1501 memset(gid->raw, 0, sizeof(gid->raw));
1502 ether_addr_copy(gid->raw, IF_LLADDR(iwdev->netdev));
1507 enum rdma_link_layer
1508 irdma_get_link_layer(struct ib_device *ibdev,
1511 return IB_LINK_LAYER_ETHERNET;
1515 ib_mtu_int_to_enum(int mtu)
1519 else if (mtu >= 2048)
1521 else if (mtu >= 1024)
1523 else if (mtu >= 512)
1530 kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev)
1532 iwdev->ibdev.uverbs_cmd_mask |=
1533 BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
1534 BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
1535 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
1536 BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST);
1540 kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
1542 iwdev->ibdev.uverbs_cmd_mask =
1543 BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
1544 BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
1545 BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
1546 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
1547 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
1548 BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
1549 BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
1550 BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1551 BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
1552 BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) |
1553 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
1554 BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1555 BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
1556 BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
1557 BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
1558 BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) |
1559 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
1560 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
1561 BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) |
1562 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
1563 BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) |
1564 BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
1565 iwdev->ibdev.uverbs_ex_cmd_mask =
1566 BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) |
1567 BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1569 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
1570 iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ);