2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
4 * Copyright (c) 2018 - 2022 Intel Corporation
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "irdma_main.h"
39 irdma_get_dev_fw_str(struct ib_device *dev,
43 struct irdma_device *iwdev = to_iwdev(dev);
45 snprintf(str, str_len, "%u.%u",
46 irdma_fw_major_ver(&iwdev->rf->sc_dev),
47 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
51 irdma_add_gid(struct ib_device *device,
54 const union ib_gid *gid,
55 const struct ib_gid_attr *attr,
62 irdma_del_gid(struct ib_device *device,
71 * irdma_alloc_mr - register stag for fast memory registration
73 * @mr_type: memory for stag registrion
74 * @max_num_sg: man number of pages
78 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
79 u32 max_num_sg, struct ib_udata *udata)
81 struct irdma_device *iwdev = to_iwdev(pd->device);
82 struct irdma_pble_alloc *palloc;
83 struct irdma_pbl *iwpbl;
84 struct irdma_mr *iwmr;
87 int err_code = -ENOMEM;
89 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
91 return ERR_PTR(-ENOMEM);
93 stag = irdma_create_stag(iwdev);
100 iwmr->ibmr.rkey = stag;
101 iwmr->ibmr.lkey = stag;
103 iwmr->ibmr.device = pd->device;
104 iwpbl = &iwmr->iwpbl;
106 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
107 palloc = &iwpbl->pble_alloc;
108 iwmr->page_cnt = max_num_sg;
109 status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
114 err_code = irdma_hw_alloc_stag(iwdev, iwmr);
118 iwpbl->pbl_allocated = true;
122 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
124 irdma_free_stag(iwdev, stag);
128 return ERR_PTR(err_code);
132 * irdma_alloc_ucontext - Allocate the user context data structure
136 * This keeps track of all objects associated with a particular
140 irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
142 struct ib_device *ibdev = uctx->device;
143 struct irdma_device *iwdev = to_iwdev(ibdev);
144 struct irdma_alloc_ucontext_req req;
145 struct irdma_alloc_ucontext_resp uresp = {0};
146 struct irdma_ucontext *ucontext = to_ucontext(uctx);
147 struct irdma_uk_attrs *uk_attrs;
149 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
152 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
155 ucontext->iwdev = iwdev;
156 ucontext->abi_ver = req.userspace_ver;
158 uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
159 /* GEN_1 support for libi40iw */
160 if (udata->outlen < sizeof(uresp)) {
161 if (uk_attrs->hw_rev != IRDMA_GEN_1)
164 ucontext->legacy_mode = true;
165 uresp.max_qps = iwdev->rf->max_qp;
166 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
167 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
168 uresp.kernel_ver = req.userspace_ver;
169 if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)))
173 (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
174 ucontext->db_mmap_entry =
175 irdma_user_mmap_entry_insert(ucontext, bar_off,
178 if (!ucontext->db_mmap_entry) {
181 uresp.kernel_ver = IRDMA_ABI_VER;
182 uresp.feature_flags = uk_attrs->feature_flags;
183 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
184 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
185 uresp.max_hw_inline = uk_attrs->max_hw_inline;
186 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
187 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
188 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
189 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
190 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
191 uresp.hw_rev = uk_attrs->hw_rev;
192 if (ib_copy_to_udata(udata, &uresp,
193 min(sizeof(uresp), udata->outlen))) {
194 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
199 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
200 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
201 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
202 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
203 INIT_LIST_HEAD(&ucontext->vma_list);
204 mutex_init(&ucontext->vma_list_mutex);
209 irdma_dev_err(&iwdev->rf->sc_dev,
210 "Invalid userspace driver version detected. Detected version %d, should be %d\n",
211 req.userspace_ver, IRDMA_ABI_VER);
216 * irdma_dealloc_ucontext - deallocate the user context data structure
217 * @context: user context created during alloc
220 irdma_dealloc_ucontext(struct ib_ucontext *context)
222 struct irdma_ucontext *ucontext = to_ucontext(context);
224 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
230 * irdma_alloc_pd - allocate protection domain
231 * @pd: protection domain
235 irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
237 struct irdma_pd *iwpd = to_iwpd(pd);
238 struct irdma_device *iwdev = to_iwdev(pd->device);
239 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
240 struct irdma_pci_f *rf = iwdev->rf;
241 struct irdma_alloc_pd_resp uresp = {0};
242 struct irdma_sc_pd *sc_pd;
246 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
251 sc_pd = &iwpd->sc_pd;
253 struct irdma_ucontext *ucontext =
254 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
257 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
259 if (ib_copy_to_udata(udata, &uresp,
260 min(sizeof(uresp), udata->outlen))) {
265 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
272 irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
278 irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
280 struct irdma_pd *iwpd = to_iwpd(ibpd);
281 struct irdma_device *iwdev = to_iwdev(ibpd->device);
283 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
287 irdma_fill_ah_info(struct irdma_ah_info *ah_info,
288 const struct ib_gid_attr *sgid_attr,
289 struct sockaddr *sgid_addr, struct sockaddr *dgid_addr,
290 u8 *dmac, u8 net_type)
292 if (net_type == RDMA_NETWORK_IPV4) {
293 ah_info->ipv4_valid = true;
294 ah_info->dest_ip_addr[0] =
295 ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr);
296 ah_info->src_ip_addr[0] =
297 ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr);
298 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
299 ah_info->dest_ip_addr[0]);
300 if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) {
301 irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
304 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
305 ((struct sockaddr_in6 *)dgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
306 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
307 ((struct sockaddr_in6 *)sgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
308 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
309 ah_info->dest_ip_addr);
310 if (rdma_is_multicast_addr(&((struct sockaddr_in6 *)dgid_addr)->sin6_addr)) {
311 irdma_mcast_mac_v6(ah_info->dest_ip_addr, dmac);
317 irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
318 struct irdma_ah_info *ah_info,
319 const struct ib_gid_attr *sgid_attr,
322 if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev))
323 ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
325 ah_info->vlan_tag = VLAN_N_VID;
327 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, dmac);
329 if (ah_info->dst_arpindex == -1)
332 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
333 ah_info->vlan_tag = 0;
335 if (ah_info->vlan_tag < VLAN_N_VID) {
336 ah_info->insert_vlan_tag = true;
338 rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
344 irdma_create_ah_wait(struct irdma_pci_f *rf,
345 struct irdma_sc_ah *sc_ah, bool sleep)
348 int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
351 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
353 } while (!sc_ah->ah_info.ah_valid && --cnt);
362 * irdma_create_ah - create address handle
364 * @attr: address handle attributes
365 * @flags: AH flags to wait
368 * returns 0 on success, error otherwise
371 irdma_create_ah(struct ib_ah *ib_ah,
372 struct ib_ah_attr *attr, u32 flags,
373 struct ib_udata *udata)
375 struct irdma_pd *pd = to_iwpd(ib_ah->pd);
376 struct irdma_ah *ah = container_of(ib_ah, struct irdma_ah, ibah);
377 struct irdma_device *iwdev = to_iwdev(ib_ah->pd->device);
379 struct ib_gid_attr sgid_attr;
380 struct irdma_pci_f *rf = iwdev->rf;
381 struct irdma_sc_ah *sc_ah;
383 struct irdma_ah_info *ah_info;
384 struct irdma_create_ah_resp uresp;
386 struct sockaddr saddr;
387 struct sockaddr_in saddr_in;
388 struct sockaddr_in6 saddr_in6;
389 } sgid_addr, dgid_addr;
394 err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
395 rf->max_ah, &ah_id, &rf->next_ah);
402 sc_ah->ah_info.ah_idx = ah_id;
403 sc_ah->ah_info.vsi = &iwdev->vsi;
404 irdma_sc_init_ah(&rf->sc_dev, sc_ah);
405 ah->sgid_index = attr->grh.sgid_index;
406 memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
408 err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
409 attr->grh.sgid_index, &sgid, &sgid_attr);
412 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
413 "GID lookup at idx=%d with port=%d failed\n",
414 attr->grh.sgid_index, attr->port_num);
418 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
419 rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
420 ah->av.attrs = *attr;
421 ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr,
426 dev_put(sgid_attr.ndev);
428 ah->av.sgid_addr.saddr = sgid_addr.saddr;
429 ah->av.dgid_addr.saddr = dgid_addr.saddr;
430 ah_info = &sc_ah->ah_info;
431 ah_info->ah_idx = ah_id;
432 ah_info->pd_idx = pd->sc_pd.pd_id;
433 ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev));
435 if (attr->ah_flags & IB_AH_GRH) {
436 ah_info->flow_label = attr->grh.flow_label;
437 ah_info->hop_ttl = attr->grh.hop_limit;
438 ah_info->tc_tos = attr->grh.traffic_class;
441 ether_addr_copy(dmac, attr->dmac);
443 irdma_fill_ah_info(ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
444 dmac, ah->av.net_type);
446 err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
450 sleep = flags & RDMA_CREATE_AH_SLEEPABLE;
452 err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
453 sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
455 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
456 "CQP-OP Create AH fail");
460 err = irdma_create_ah_wait(rf, sc_ah, sleep);
462 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
463 "CQP create AH timed out");
468 uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
469 err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
473 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
478 irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
480 ether_addr_copy(dmac, attr->dmac);
484 irdma_create_ah_stub(struct ib_ah *ib_ah,
485 struct ib_ah_attr *attr, u32 flags,
486 struct ib_udata *udata)
492 irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags)
498 * irdma_free_qp_rsrc - free up memory resources for qp
499 * @iwqp: qp ptr (user or kernel)
502 irdma_free_qp_rsrc(struct irdma_qp *iwqp)
504 struct irdma_device *iwdev = iwqp->iwdev;
505 struct irdma_pci_f *rf = iwdev->rf;
506 u32 qp_num = iwqp->ibqp.qp_num;
508 irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
509 irdma_dealloc_push_page(rf, &iwqp->sc_qp);
510 if (iwqp->sc_qp.vsi) {
511 irdma_qp_rem_qos(&iwqp->sc_qp);
512 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
513 iwqp->sc_qp.user_pri);
517 irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
518 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
519 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
520 kfree(iwqp->kqp.sig_trk_mem);
521 iwqp->kqp.sig_trk_mem = NULL;
522 kfree(iwqp->kqp.sq_wrid_mem);
523 kfree(iwqp->kqp.rq_wrid_mem);
524 kfree(iwqp->sg_list);
529 * irdma_create_qp - create qp
531 * @init_attr: attributes for qp
532 * @udata: user data for create qp
535 irdma_create_qp(struct ib_pd *ibpd,
536 struct ib_qp_init_attr *init_attr,
537 struct ib_udata *udata)
539 struct irdma_pd *iwpd = to_iwpd(ibpd);
540 struct irdma_device *iwdev = to_iwdev(ibpd->device);
541 struct irdma_pci_f *rf = iwdev->rf;
542 struct irdma_qp *iwqp;
543 struct irdma_create_qp_req req;
544 struct irdma_create_qp_resp uresp = {0};
550 struct irdma_sc_qp *qp;
551 struct irdma_sc_dev *dev = &rf->sc_dev;
552 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
553 struct irdma_qp_init_info init_info = {{0}};
554 struct irdma_qp_host_ctx_info *ctx_info;
557 err_code = irdma_validate_qp_attrs(init_attr, iwdev);
559 return ERR_PTR(err_code);
561 sq_size = init_attr->cap.max_send_wr;
562 rq_size = init_attr->cap.max_recv_wr;
564 init_info.vsi = &iwdev->vsi;
565 init_info.qp_uk_init_info.uk_attrs = uk_attrs;
566 init_info.qp_uk_init_info.sq_size = sq_size;
567 init_info.qp_uk_init_info.rq_size = rq_size;
568 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
569 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
570 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
572 iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
574 return ERR_PTR(-ENOMEM);
576 iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, sizeof(*iwqp->sg_list),
578 if (!iwqp->sg_list) {
580 return ERR_PTR(-ENOMEM);
584 qp->qp_uk.back_qp = iwqp;
585 qp->qp_uk.lock = &iwqp->lock;
586 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
589 iwqp->q2_ctx_mem.size = IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE;
590 iwqp->q2_ctx_mem.va = irdma_allocate_dma_mem(dev->hw, &iwqp->q2_ctx_mem,
591 iwqp->q2_ctx_mem.size,
593 if (!iwqp->q2_ctx_mem.va) {
594 kfree(iwqp->sg_list);
596 return ERR_PTR(-ENOMEM);
599 init_info.q2 = iwqp->q2_ctx_mem.va;
600 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
601 init_info.host_ctx = (__le64 *) (init_info.q2 + IRDMA_Q2_BUF_SIZE);
602 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
604 if (init_attr->qp_type == IB_QPT_GSI)
607 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
608 &qp_num, &rf->next_qp);
613 iwqp->ibqp.qp_num = qp_num;
615 iwqp->iwscq = to_iwcq(init_attr->send_cq);
616 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
617 iwqp->host_ctx.va = init_info.host_ctx;
618 iwqp->host_ctx.pa = init_info.host_ctx_pa;
619 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
621 init_info.pd = &iwpd->sc_pd;
622 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
623 if (!rdma_protocol_roce(&iwdev->ibdev, 1))
624 init_info.qp_uk_init_info.first_sq_wq = 1;
625 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
626 init_waitqueue_head(&iwqp->waitq);
627 init_waitqueue_head(&iwqp->mod_qp_waitq);
630 err_code = ib_copy_from_udata(&req, udata,
631 min(sizeof(req), udata->inlen));
633 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
634 "ib_copy_from_data fail\n");
638 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
640 if (req.user_wqe_bufs) {
641 struct irdma_ucontext *ucontext = to_ucontext(ibpd->uobject->context);
643 init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
644 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
645 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
646 &ucontext->qp_reg_mem_list);
647 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
651 irdma_debug(iwdev_to_idev(iwdev),
657 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
658 irdma_setup_virt_qp(iwdev, iwqp, &init_info);
660 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
661 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
662 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
666 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
667 "setup qp failed\n");
671 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
672 if (init_attr->qp_type == IB_QPT_RC) {
673 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
674 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
675 IRDMA_WRITE_WITH_IMM |
678 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
679 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
683 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
684 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
687 ret = irdma_sc_qp_init(qp, &init_info);
690 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
695 ctx_info = &iwqp->ctx_info;
696 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
697 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
699 if (rdma_protocol_roce(&iwdev->ibdev, 1))
700 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
702 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
704 err_code = irdma_cqp_create_qp_cmd(iwqp);
708 atomic_set(&iwqp->refcnt, 1);
709 spin_lock_init(&iwqp->lock);
710 spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
711 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
712 rf->qp_table[qp_num] = iwqp;
713 iwqp->max_send_wr = sq_size;
714 iwqp->max_recv_wr = rq_size;
716 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
717 if (dev->ws_add(&iwdev->vsi, 0)) {
718 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
723 irdma_qp_add_qos(&iwqp->sc_qp);
727 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
728 if (udata->outlen < sizeof(uresp)) {
730 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
732 if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
735 uresp.actual_sq_size = sq_size;
736 uresp.actual_rq_size = rq_size;
737 uresp.qp_id = qp_num;
738 uresp.qp_caps = qp->qp_uk.qp_caps;
740 err_code = ib_copy_to_udata(udata, &uresp,
741 min(sizeof(uresp), udata->outlen));
743 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
744 "copy_to_udata failed\n");
745 kc_irdma_destroy_qp(&iwqp->ibqp, udata);
746 return ERR_PTR(err_code);
750 init_completion(&iwqp->free_qp);
754 irdma_free_qp_rsrc(iwqp);
756 return ERR_PTR(err_code);
760 * irdma_destroy_qp - destroy qp
761 * @ibqp: qp's ib pointer also to get to device's qp address
765 irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
767 struct irdma_qp *iwqp = to_iwqp(ibqp);
768 struct irdma_device *iwdev = iwqp->iwdev;
770 if (iwqp->sc_qp.qp_uk.destroy_pending)
772 iwqp->sc_qp.qp_uk.destroy_pending = true;
773 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
774 irdma_modify_qp_to_err(&iwqp->sc_qp);
776 if (!iwqp->user_mode)
777 cancel_delayed_work_sync(&iwqp->dwork_flush);
779 irdma_qp_rem_ref(&iwqp->ibqp);
780 wait_for_completion(&iwqp->free_qp);
781 irdma_free_lsmm_rsrc(iwqp);
782 if (!iwdev->rf->reset &&
783 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
784 return -ENOTRECOVERABLE;
786 if (!iwqp->user_mode) {
788 irdma_clean_cqes(iwqp, iwqp->iwscq);
789 if (iwqp->iwrcq != iwqp->iwscq)
790 irdma_clean_cqes(iwqp, iwqp->iwrcq);
793 irdma_remove_push_mmap_entries(iwqp);
794 irdma_free_qp_rsrc(iwqp);
800 * irdma_create_cq - create cq
801 * @ibcq: CQ allocated
802 * @attr: attributes for cq
806 irdma_create_cq(struct ib_cq *ibcq,
807 const struct ib_cq_init_attr *attr,
808 struct ib_udata *udata)
810 struct ib_device *ibdev = ibcq->device;
811 struct irdma_device *iwdev = to_iwdev(ibdev);
812 struct irdma_pci_f *rf = iwdev->rf;
813 struct irdma_cq *iwcq = to_iwcq(ibcq);
815 struct irdma_sc_cq *cq;
816 struct irdma_sc_dev *dev = &rf->sc_dev;
817 struct irdma_cq_init_info info = {0};
819 struct irdma_cqp_request *cqp_request;
820 struct cqp_cmds_info *cqp_info;
821 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
824 int entries = attr->cqe;
826 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
829 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
835 atomic_set(&iwcq->refcnt, 1);
836 spin_lock_init(&iwcq->lock);
837 INIT_LIST_HEAD(&iwcq->resize_list);
838 INIT_LIST_HEAD(&iwcq->cmpl_generated);
840 ukinfo->cq_size = max(entries, 4);
841 ukinfo->cq_id = cq_num;
842 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
843 if (attr->comp_vector < rf->ceqs_count)
844 info.ceq_id = attr->comp_vector;
845 info.ceq_id_valid = true;
847 info.type = IRDMA_CQ_TYPE_IWARP;
848 info.vsi = &iwdev->vsi;
851 struct irdma_ucontext *ucontext;
852 struct irdma_create_cq_req req = {0};
853 struct irdma_cq_mr *cqmr;
854 struct irdma_pbl *iwpbl;
855 struct irdma_pbl *iwpbl_shadow;
856 struct irdma_cq_mr *cqmr_shadow;
858 iwcq->user_mode = true;
859 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
860 if (ib_copy_from_udata(&req, udata,
861 min(sizeof(req), udata->inlen))) {
866 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
867 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
868 &ucontext->cq_reg_mem_list);
869 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
875 iwcq->cq_mem_size = 0;
876 cqmr = &iwpbl->cq_mr;
878 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
879 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
880 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
881 iwpbl_shadow = irdma_get_pbl((unsigned long)req.user_shadow_area,
882 &ucontext->cq_reg_mem_list);
883 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
889 iwcq->iwpbl_shadow = iwpbl_shadow;
890 cqmr_shadow = &iwpbl_shadow->cq_mr;
891 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
894 info.shadow_area_pa = cqmr->shadow;
896 if (iwpbl->pbl_allocated) {
897 info.virtual_map = true;
898 info.pbl_chunk_size = 1;
899 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
901 info.cq_base_pa = cqmr->cq_pbl.addr;
904 /* Kmode allocations */
907 if (entries < 1 || entries > rf->max_cqe) {
913 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
915 ukinfo->cq_size = entries;
917 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
918 iwcq->kmem.size = round_up(rsize, 256);
919 iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem,
920 iwcq->kmem.size, 256);
921 if (!iwcq->kmem.va) {
926 iwcq->kmem_shadow.size = IRDMA_SHADOW_AREA_SIZE << 3;
927 iwcq->kmem_shadow.va = irdma_allocate_dma_mem(dev->hw,
929 iwcq->kmem_shadow.size,
932 if (!iwcq->kmem_shadow.va) {
936 info.shadow_area_pa = iwcq->kmem_shadow.pa;
937 ukinfo->shadow_area = iwcq->kmem_shadow.va;
938 ukinfo->cq_base = iwcq->kmem.va;
939 info.cq_base_pa = iwcq->kmem.pa;
942 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
943 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
944 (u32)IRDMA_MAX_CQ_READ_THRESH);
945 if (irdma_sc_cq_init(cq, &info)) {
946 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
952 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
957 cqp_info = &cqp_request->info;
958 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
959 cqp_info->post_sq = 1;
960 cqp_info->in.u.cq_create.cq = cq;
961 cqp_info->in.u.cq_create.check_overflow = true;
962 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
963 status = irdma_handle_cqp_op(rf, cqp_request);
964 irdma_put_cqp_request(&rf->cqp, cqp_request);
971 struct irdma_create_cq_resp resp = {0};
973 resp.cq_id = info.cq_uk_init_info.cq_id;
974 resp.cq_size = info.cq_uk_init_info.cq_size;
975 if (ib_copy_to_udata(udata, &resp,
976 min(sizeof(resp), udata->outlen))) {
977 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
978 "copy to user data\n");
984 rf->cq_table[cq_num] = iwcq;
985 init_completion(&iwcq->free_cq);
989 irdma_cq_wq_destroy(rf, cq);
991 irdma_cq_free_rsrc(rf, iwcq);
996 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
997 * @iwmr: iwmr for IB's user page addresses
998 * @pbl: ple pointer to save 1 level or 0 level pble
999 * @level: indicated level 0, 1 or 2
1003 irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
1004 enum irdma_pble_level level)
1006 struct ib_umem *region = iwmr->region;
1007 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1008 int chunk_pages, entry, i;
1009 struct scatterlist *sg;
1011 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1012 struct irdma_pble_info *pinfo;
1016 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
1017 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1018 chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size);
1019 if (iwmr->type == IRDMA_MEMREG_TYPE_QP && !iwpbl->qp_mr.sq_page)
1020 iwpbl->qp_mr.sq_page = sg_page(sg);
1021 for (i = 0; i < chunk_pages; i++) {
1022 pg_addr = sg_dma_address(sg) + (i * iwmr->page_size);
1023 if ((entry + i) == 0)
1024 *pbl = pg_addr & iwmr->page_msk;
1025 else if (!(pg_addr & ~iwmr->page_msk))
1029 if (++pbl_cnt == palloc->total_cnt)
1031 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
1037 * irdma_destroy_ah - Destroy address handle
1038 * @ibah: pointer to address handle
1039 * @ah_flags: destroy flags
1043 irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
1045 struct irdma_device *iwdev = to_iwdev(ibah->device);
1046 struct irdma_ah *ah = to_iwah(ibah);
1048 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
1051 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
1052 ah->sc_ah.ah_info.ah_idx);
1056 irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1058 struct ib_pd *ibpd = ib_mr->pd;
1059 struct irdma_pd *iwpd = to_iwpd(ibpd);
1060 struct irdma_mr *iwmr = to_iwmr(ib_mr);
1061 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
1062 struct irdma_dealloc_stag_info *info;
1063 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1064 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1065 struct irdma_cqp_request *cqp_request;
1066 struct cqp_cmds_info *cqp_info;
1069 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
1071 struct irdma_ucontext *ucontext;
1073 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1074 irdma_del_memlist(iwmr, ucontext);
1079 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
1083 cqp_info = &cqp_request->info;
1084 info = &cqp_info->in.u.dealloc_stag.info;
1085 memset(info, 0, sizeof(*info));
1086 info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
1087 info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
1089 if (iwpbl->pbl_allocated)
1090 info->dealloc_pbl = true;
1092 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
1093 cqp_info->post_sq = 1;
1094 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
1095 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
1096 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
1097 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
1101 irdma_free_stag(iwdev, iwmr->stag);
1103 if (iwpbl->pbl_allocated)
1104 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
1107 ib_umem_release(iwmr->region);
1114 kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
1119 struct ib_gid_attr sgid_attr;
1120 struct irdma_av *av = &iwqp->roce_ah.av;
1122 ret = ib_get_cached_gid(iwqp->ibqp.device, attr->ah_attr.port_num,
1123 attr->ah_attr.grh.sgid_index, &sgid,
1128 if (sgid_attr.ndev) {
1129 *vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
1130 ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, IF_LLADDR(sgid_attr.ndev));
1133 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
1135 dev_put(sgid_attr.ndev);
1141 * irdma_destroy_cq - destroy cq
1142 * @ib_cq: cq pointer
1146 irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1148 struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1149 struct irdma_cq *iwcq = to_iwcq(ib_cq);
1150 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1151 struct irdma_sc_dev *dev = cq->dev;
1152 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1153 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1154 unsigned long flags;
1156 spin_lock_irqsave(&iwcq->lock, flags);
1157 if (!list_empty(&iwcq->cmpl_generated))
1158 irdma_remove_cmpls_list(iwcq);
1159 if (!list_empty(&iwcq->resize_list))
1160 irdma_process_resize_list(iwcq, iwdev, NULL);
1161 spin_unlock_irqrestore(&iwcq->lock, flags);
1163 irdma_cq_rem_ref(ib_cq);
1164 wait_for_completion(&iwcq->free_cq);
1166 irdma_cq_wq_destroy(iwdev->rf, cq);
1167 irdma_cq_free_rsrc(iwdev->rf, iwcq);
1169 spin_lock_irqsave(&iwceq->ce_lock, flags);
1170 irdma_sc_cleanup_ceqes(cq, ceq);
1171 spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1175 * irdma_alloc_mw - Allocate memory window
1176 * @pd: Protection domain
1177 * @type: Window type
1178 * @udata: user data pointer
1181 irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1182 struct ib_udata *udata)
1184 struct irdma_device *iwdev = to_iwdev(pd->device);
1185 struct irdma_mr *iwmr;
1189 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1191 return ERR_PTR(-ENOMEM);
1193 stag = irdma_create_stag(iwdev);
1196 return ERR_PTR(-ENOMEM);
1200 iwmr->ibmw.rkey = stag;
1202 iwmr->ibmw.type = type;
1203 iwmr->ibmw.device = pd->device;
1205 err_code = irdma_hw_alloc_mw(iwdev, iwmr);
1207 irdma_free_stag(iwdev, stag);
1209 return ERR_PTR(err_code);
1216 * kc_set_loc_seq_num_mss - Set local seq number and mss
1217 * @cm_node: cm node info
1220 kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node)
1225 cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
1226 if (cm_node->iwdev->vsi.mtu > 1500 &&
1227 2 * cm_node->iwdev->vsi.mtu > cm_node->iwdev->rcv_wnd)
1228 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1229 (1500 - IRDMA_MTU_TO_MSS_IPV4) :
1230 (1500 - IRDMA_MTU_TO_MSS_IPV6);
1232 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1233 (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4) :
1234 (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6);
1238 * irdma_disassociate_ucontext - Disassociate user context
1239 * @context: ib user context
1242 irdma_disassociate_ucontext(struct ib_ucontext *context)
1247 ib_device_get_by_netdev(struct ifnet *netdev, int driver_id)
1249 struct irdma_device *iwdev;
1250 struct irdma_handler *hdl;
1251 unsigned long flags;
1253 spin_lock_irqsave(&irdma_handler_lock, flags);
1254 list_for_each_entry(hdl, &irdma_handlers, list) {
1256 if (netdev == iwdev->netdev) {
1257 spin_unlock_irqrestore(&irdma_handler_lock,
1259 return &iwdev->ibdev;
1262 spin_unlock_irqrestore(&irdma_handler_lock, flags);
1268 ib_unregister_device_put(struct ib_device *device)
1270 ib_unregister_device(device);
1274 * irdma_query_gid_roce - Query port GID for Roce
1275 * @ibdev: device pointer from stack
1276 * @port: port number
1277 * @index: Entry index
1281 irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
1286 ret = rdma_query_gid(ibdev, port, index, gid);
1287 if (ret == -EAGAIN) {
1288 memcpy(gid, &zgid, sizeof(*gid));
1296 * irdma_modify_port - modify port attributes
1297 * @ibdev: device pointer from stack
1298 * @port: port number for query
1299 * @mask: Property mask
1300 * @props: returning device attributes
1303 irdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
1304 struct ib_port_modify *props)
1313 * irdma_query_pkey - Query partition key
1314 * @ibdev: device pointer from stack
1315 * @port: port number
1316 * @index: index of pkey
1317 * @pkey: pointer to store the pkey
1320 irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1323 if (index >= IRDMA_PKEY_TBL_SZ)
1326 *pkey = IRDMA_DEFAULT_PKEY;
1331 irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
1332 struct ib_port_immutable *immutable)
1334 struct ib_port_attr attr;
1337 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
1338 err = ib_query_port(ibdev, port_num, &attr);
1342 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1343 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1344 immutable->gid_tbl_len = attr.gid_tbl_len;
1350 irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
1351 struct ib_port_immutable *immutable)
1353 struct ib_port_attr attr;
1356 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
1357 err = ib_query_port(ibdev, port_num, &attr);
1360 immutable->gid_tbl_len = 1;
1366 * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
1367 * @link_speed: netdev phy link speed
1368 * @active_speed: IB port speed
1369 * @active_width: IB port width
1372 irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
1375 if (link_speed <= SPEED_1000) {
1376 *active_width = IB_WIDTH_1X;
1377 *active_speed = IB_SPEED_SDR;
1378 } else if (link_speed <= SPEED_10000) {
1379 *active_width = IB_WIDTH_1X;
1380 *active_speed = IB_SPEED_FDR10;
1381 } else if (link_speed <= SPEED_20000) {
1382 *active_width = IB_WIDTH_4X;
1383 *active_speed = IB_SPEED_DDR;
1384 } else if (link_speed <= SPEED_25000) {
1385 *active_width = IB_WIDTH_1X;
1386 *active_speed = IB_SPEED_EDR;
1387 } else if (link_speed <= SPEED_40000) {
1388 *active_width = IB_WIDTH_4X;
1389 *active_speed = IB_SPEED_FDR10;
1391 *active_width = IB_WIDTH_4X;
1392 *active_speed = IB_SPEED_EDR;
1397 * irdma_query_port - get port attributes
1398 * @ibdev: device pointer from stack
1399 * @port: port number for query
1400 * @props: returning device attributes
1403 irdma_query_port(struct ib_device *ibdev, u8 port,
1404 struct ib_port_attr *props)
1406 struct irdma_device *iwdev = to_iwdev(ibdev);
1407 struct ifnet *netdev = iwdev->netdev;
1409 /* no need to zero out pros here. done by caller */
1411 props->max_mtu = IB_MTU_4096;
1412 props->active_mtu = ib_mtu_int_to_enum(netdev->if_mtu);
1417 if ((netdev->if_link_state == LINK_STATE_UP) && (netdev->if_drv_flags & IFF_DRV_RUNNING)) {
1418 props->state = IB_PORT_ACTIVE;
1419 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
1421 props->state = IB_PORT_DOWN;
1422 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
1424 irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
1425 &props->active_width);
1427 if (rdma_protocol_roce(ibdev, 1)) {
1428 props->gid_tbl_len = 32;
1429 kc_set_props_ip_gid_caps(props);
1430 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
1432 props->gid_tbl_len = 1;
1434 props->qkey_viol_cntr = 0;
1435 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
1436 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
1441 extern const char *const irdma_hw_stat_names[];
1444 * irdma_alloc_hw_stats - Allocate a hw stats structure
1445 * @ibdev: device pointer from stack
1446 * @port_num: port number
1448 struct rdma_hw_stats *
1449 irdma_alloc_hw_stats(struct ib_device *ibdev,
1452 struct irdma_device *iwdev = to_iwdev(ibdev);
1453 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1455 int num_counters = dev->hw_attrs.max_stat_idx;
1456 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
1458 return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
1463 * irdma_get_hw_stats - Populates the rdma_hw_stats structure
1464 * @ibdev: device pointer from stack
1465 * @stats: stats pointer from stack
1466 * @port_num: port number
1467 * @index: which hw counter the stack is requesting we update
1470 irdma_get_hw_stats(struct ib_device *ibdev,
1471 struct rdma_hw_stats *stats, u8 port_num,
1474 struct irdma_device *iwdev = to_iwdev(ibdev);
1475 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
1477 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
1478 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
1480 memcpy(&stats->value[0], hw_stats, sizeof(u64)* stats->num_counters);
1482 return stats->num_counters;
1486 * irdma_query_gid - Query port GID
1487 * @ibdev: device pointer from stack
1488 * @port: port number
1489 * @index: Entry index
1493 irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
1496 struct irdma_device *iwdev = to_iwdev(ibdev);
1498 memset(gid->raw, 0, sizeof(gid->raw));
1499 ether_addr_copy(gid->raw, IF_LLADDR(iwdev->netdev));
1504 enum rdma_link_layer
1505 irdma_get_link_layer(struct ib_device *ibdev,
1508 return IB_LINK_LAYER_ETHERNET;
1512 ib_mtu_int_to_enum(int mtu)
1516 else if (mtu >= 2048)
1518 else if (mtu >= 1024)
1520 else if (mtu >= 512)
1527 kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev)
1529 iwdev->ibdev.uverbs_cmd_mask |=
1530 BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
1531 BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
1532 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
1533 BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST);
1537 kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
1539 iwdev->ibdev.uverbs_cmd_mask =
1540 BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
1541 BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
1542 BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
1543 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
1544 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
1545 BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
1546 BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
1547 BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1548 BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
1549 BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) |
1550 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
1551 BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1552 BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
1553 BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
1554 BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
1555 BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) |
1556 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
1557 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
1558 BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) |
1559 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
1560 BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) |
1561 BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
1562 iwdev->ibdev.uverbs_ex_cmd_mask =
1563 BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) |
1564 BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1566 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
1567 iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ);