2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
4 * Copyright (c) 2015 - 2022 Intel Corporation
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "irdma_main.h"
39 * irdma_query_device - get device attributes
40 * @ibdev: device pointer from stack
41 * @props: returning device attributes
45 irdma_query_device(struct ib_device *ibdev,
46 struct ib_device_attr *props,
47 struct ib_udata *udata)
49 struct irdma_device *iwdev = to_iwdev(ibdev);
50 struct irdma_pci_f *rf = iwdev->rf;
51 struct pci_dev *pcidev = iwdev->rf->pcidev;
52 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
54 if (udata->inlen || udata->outlen)
57 memset(props, 0, sizeof(*props));
58 ether_addr_copy((u8 *)&props->sys_image_guid, IF_LLADDR(iwdev->netdev));
59 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
60 irdma_fw_minor_ver(&rf->sc_dev);
61 props->device_cap_flags = iwdev->device_cap_flags;
62 props->vendor_id = pcidev->vendor;
63 props->vendor_part_id = pcidev->device;
64 props->hw_ver = pcidev->revision;
65 props->page_size_cap = SZ_4K | SZ_2M | SZ_1G;
66 props->max_mr_size = hw_attrs->max_mr_size;
67 props->max_qp = rf->max_qp - rf->used_qps;
68 props->max_qp_wr = hw_attrs->max_qp_wr;
69 set_max_sge(props, rf);
70 props->max_cq = rf->max_cq - rf->used_cqs;
71 props->max_cqe = rf->max_cqe;
72 props->max_mr = rf->max_mr - rf->used_mrs;
73 props->max_mw = props->max_mr;
74 props->max_pd = rf->max_pd - rf->used_pds;
75 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
76 props->max_qp_rd_atom = hw_attrs->max_hw_ird;
77 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
78 if (rdma_protocol_roce(ibdev, 1))
79 props->max_pkeys = IRDMA_PKEY_TBL_SZ;
80 props->max_ah = rf->max_ah;
81 props->max_mcast_grp = rf->max_mcg;
82 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
83 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
84 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
85 #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
86 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
87 props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
93 irdma_mmap_legacy(struct irdma_ucontext *ucontext,
94 struct vm_area_struct *vma)
98 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
101 vma->vm_private_data = ucontext;
102 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
103 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
105 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
106 pgprot_noncached(vma->vm_page_prot), NULL);
110 irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
112 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
117 struct rdma_user_mmap_entry *
118 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
119 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
121 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
127 entry->bar_offset = bar_offset;
128 entry->mmap_flag = mmap_flag;
130 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
131 &entry->rdma_entry, PAGE_SIZE);
136 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
138 return &entry->rdma_entry;
142 * irdma_mmap - user memory map
143 * @context: context created during alloc
144 * @vma: kernel info for user memory map
147 irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
149 struct rdma_user_mmap_entry *rdma_entry;
150 struct irdma_user_mmap_entry *entry;
151 struct irdma_ucontext *ucontext;
155 ucontext = to_ucontext(context);
157 /* Legacy support for libi40iw with hard-coded mmap key */
158 if (ucontext->legacy_mode)
159 return irdma_mmap_legacy(ucontext, vma);
161 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
163 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
164 "pgoff[0x%lx] does not have valid entry\n",
169 entry = to_irdma_mmap_entry(rdma_entry);
170 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
171 "bar_offset [0x%lx] mmap_flag [%d]\n", entry->bar_offset,
174 pfn = (entry->bar_offset +
175 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
177 switch (entry->mmap_flag) {
178 case IRDMA_MMAP_IO_NC:
179 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
180 pgprot_noncached(vma->vm_page_prot),
183 case IRDMA_MMAP_IO_WC:
184 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
185 pgprot_writecombine(vma->vm_page_prot),
193 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
194 "bar_offset [0x%lx] mmap_flag[%d] err[%d]\n",
195 entry->bar_offset, entry->mmap_flag, ret);
196 rdma_user_mmap_entry_put(rdma_entry);
202 * irdma_alloc_push_page - allocate a push page for qp
206 irdma_alloc_push_page(struct irdma_qp *iwqp)
208 struct irdma_cqp_request *cqp_request;
209 struct cqp_cmds_info *cqp_info;
210 struct irdma_device *iwdev = iwqp->iwdev;
211 struct irdma_sc_qp *qp = &iwqp->sc_qp;
214 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
218 cqp_info = &cqp_request->info;
219 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
220 cqp_info->post_sq = 1;
221 cqp_info->in.u.manage_push_page.info.push_idx = 0;
222 cqp_info->in.u.manage_push_page.info.qs_handle =
223 qp->vsi->qos[qp->user_pri].qs_handle;
224 cqp_info->in.u.manage_push_page.info.free_page = 0;
225 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
226 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
227 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
229 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
230 if (!status && cqp_request->compl_info.op_ret_val <
231 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
232 qp->push_idx = cqp_request->compl_info.op_ret_val;
236 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
240 * irdma_get_pbl - Retrieve pbl from a list given a virtual
242 * @va: user virtual address
243 * @pbl_list: pbl list to search in (QP's or CQ's)
246 irdma_get_pbl(unsigned long va,
247 struct list_head *pbl_list)
249 struct irdma_pbl *iwpbl;
251 list_for_each_entry(iwpbl, pbl_list, list) {
252 if (iwpbl->user_base == va) {
253 list_del(&iwpbl->list);
254 iwpbl->on_list = false;
263 * irdma_clean_cqes - clean cq entries for qp
264 * @iwqp: qp ptr (user or kernel)
268 irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
270 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
273 spin_lock_irqsave(&iwcq->lock, flags);
274 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
275 spin_unlock_irqrestore(&iwcq->lock, flags);
279 irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
281 if (iwqp->push_db_mmap_entry) {
282 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
283 iwqp->push_db_mmap_entry = NULL;
285 if (iwqp->push_wqe_mmap_entry) {
286 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
287 iwqp->push_wqe_mmap_entry = NULL;
292 irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
293 struct irdma_qp *iwqp,
294 u64 *push_wqe_mmap_key,
295 u64 *push_db_mmap_key)
297 struct irdma_device *iwdev = ucontext->iwdev;
300 WARN_ON_ONCE(iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_2);
302 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
304 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
305 /* skip over db page */
306 bar_off += IRDMA_HW_PAGE_SIZE;
307 /* skip over reserved space */
308 bar_off += IRDMA_PF_BAR_RSVD;
312 bar_off += iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
313 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
314 bar_off, IRDMA_MMAP_IO_WC,
316 if (!iwqp->push_wqe_mmap_entry)
319 /* push doorbell page */
320 bar_off += IRDMA_HW_PAGE_SIZE;
321 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
322 bar_off, IRDMA_MMAP_IO_NC,
324 if (!iwqp->push_db_mmap_entry) {
325 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
333 * irdma_setup_virt_qp - setup for allocation of virtual qp
334 * @iwdev: irdma device
336 * @init_info: initialize info to return
339 irdma_setup_virt_qp(struct irdma_device *iwdev,
340 struct irdma_qp *iwqp,
341 struct irdma_qp_init_info *init_info)
343 struct irdma_pbl *iwpbl = iwqp->iwpbl;
344 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
346 iwqp->page = qpmr->sq_page;
347 init_info->shadow_area_pa = qpmr->shadow;
348 if (iwpbl->pbl_allocated) {
349 init_info->virtual_map = true;
350 init_info->sq_pa = qpmr->sq_pbl.idx;
351 init_info->rq_pa = qpmr->rq_pbl.idx;
353 init_info->sq_pa = qpmr->sq_pbl.addr;
354 init_info->rq_pa = qpmr->rq_pbl.addr;
359 * irdma_setup_kmode_qp - setup initialization for kernel mode qp
360 * @iwdev: iwarp device
361 * @iwqp: qp ptr (user or kernel)
362 * @info: initialize info to return
363 * @init_attr: Initial QP create attributes
366 irdma_setup_kmode_qp(struct irdma_device *iwdev,
367 struct irdma_qp *iwqp,
368 struct irdma_qp_init_info *info,
369 struct ib_qp_init_attr *init_attr)
371 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
372 u32 sqdepth, rqdepth;
376 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
377 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
379 irdma_get_wqe_shift(uk_attrs,
380 uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
381 ukinfo->max_sq_frag_cnt,
382 ukinfo->max_inline_data, &sqshift);
383 status = irdma_get_sqdepth(uk_attrs->max_hw_wq_quanta, ukinfo->sq_size,
388 if (uk_attrs->hw_rev == IRDMA_GEN_1)
389 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
391 irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
394 status = irdma_get_rqdepth(uk_attrs->max_hw_rq_quanta, ukinfo->rq_size,
399 iwqp->kqp.sq_wrid_mem =
400 kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
401 if (!iwqp->kqp.sq_wrid_mem)
404 iwqp->kqp.rq_wrid_mem =
405 kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
406 if (!iwqp->kqp.rq_wrid_mem) {
407 kfree(iwqp->kqp.sq_wrid_mem);
408 iwqp->kqp.sq_wrid_mem = NULL;
412 iwqp->kqp.sig_trk_mem = kcalloc(sqdepth, sizeof(u32), GFP_KERNEL);
413 memset(iwqp->kqp.sig_trk_mem, 0, sqdepth * sizeof(u32));
414 if (!iwqp->kqp.sig_trk_mem) {
415 kfree(iwqp->kqp.sq_wrid_mem);
416 iwqp->kqp.sq_wrid_mem = NULL;
417 kfree(iwqp->kqp.rq_wrid_mem);
418 iwqp->kqp.rq_wrid_mem = NULL;
421 ukinfo->sq_sigwrtrk_array = (void *)iwqp->kqp.sig_trk_mem;
422 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
423 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
425 size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
426 size += (IRDMA_SHADOW_AREA_SIZE << 3);
429 mem->va = irdma_allocate_dma_mem(&iwdev->rf->hw, mem, mem->size,
432 kfree(iwqp->kqp.sq_wrid_mem);
433 iwqp->kqp.sq_wrid_mem = NULL;
434 kfree(iwqp->kqp.rq_wrid_mem);
435 iwqp->kqp.rq_wrid_mem = NULL;
439 ukinfo->sq = mem->va;
440 info->sq_pa = mem->pa;
441 ukinfo->rq = &ukinfo->sq[sqdepth];
442 info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
443 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
444 info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
445 ukinfo->sq_size = sqdepth >> sqshift;
446 ukinfo->rq_size = rqdepth >> rqshift;
447 ukinfo->qp_id = iwqp->ibqp.qp_num;
449 init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
450 init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
456 irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
458 struct irdma_pci_f *rf = iwqp->iwdev->rf;
459 struct irdma_cqp_request *cqp_request;
460 struct cqp_cmds_info *cqp_info;
461 struct irdma_create_qp_info *qp_info;
464 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
468 cqp_info = &cqp_request->info;
469 qp_info = &cqp_request->info.in.u.qp_create.info;
470 memset(qp_info, 0, sizeof(*qp_info));
471 qp_info->mac_valid = true;
472 qp_info->cq_num_valid = true;
473 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
475 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
476 cqp_info->post_sq = 1;
477 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
478 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
479 status = irdma_handle_cqp_op(rf, cqp_request);
480 irdma_put_cqp_request(&rf->cqp, cqp_request);
486 irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
487 struct irdma_qp_host_ctx_info *ctx_info)
489 struct irdma_device *iwdev = iwqp->iwdev;
490 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
491 struct irdma_roce_offload_info *roce_info;
492 struct irdma_udp_offload_info *udp_info;
494 udp_info = &iwqp->udp_info;
495 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
496 udp_info->cwnd = iwdev->roce_cwnd;
497 udp_info->rexmit_thresh = 2;
498 udp_info->rnr_nak_thresh = 2;
499 udp_info->src_port = 0xc000;
500 udp_info->dst_port = ROCE_V2_UDP_DPORT;
501 roce_info = &iwqp->roce_info;
502 ether_addr_copy(roce_info->mac_addr, IF_LLADDR(iwdev->netdev));
504 roce_info->rd_en = true;
505 roce_info->wr_rdresp_en = true;
506 roce_info->bind_en = true;
507 roce_info->dcqcn_en = false;
508 roce_info->rtomin = 5;
510 roce_info->ack_credits = iwdev->roce_ackcreds;
511 roce_info->ird_size = dev->hw_attrs.max_hw_ird;
512 roce_info->ord_size = dev->hw_attrs.max_hw_ord;
514 if (!iwqp->user_mode) {
515 roce_info->priv_mode_en = true;
516 roce_info->fast_reg_en = true;
517 roce_info->udprivcq_en = true;
519 roce_info->roce_tver = 0;
521 ctx_info->roce_info = &iwqp->roce_info;
522 ctx_info->udp_info = &iwqp->udp_info;
523 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
527 irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
528 struct irdma_qp_host_ctx_info *ctx_info)
530 struct irdma_device *iwdev = iwqp->iwdev;
531 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
532 struct irdma_iwarp_offload_info *iwarp_info;
534 iwarp_info = &iwqp->iwarp_info;
535 ether_addr_copy(iwarp_info->mac_addr, IF_LLADDR(iwdev->netdev));
536 iwarp_info->rd_en = true;
537 iwarp_info->wr_rdresp_en = true;
538 iwarp_info->bind_en = true;
539 iwarp_info->ecn_en = true;
540 iwarp_info->rtomin = 5;
542 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
543 iwarp_info->ib_rd_en = true;
544 if (!iwqp->user_mode) {
545 iwarp_info->priv_mode_en = true;
546 iwarp_info->fast_reg_en = true;
548 iwarp_info->ddp_ver = 1;
549 iwarp_info->rdmap_ver = 1;
551 ctx_info->iwarp_info = &iwqp->iwarp_info;
552 ctx_info->iwarp_info_valid = true;
553 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
554 ctx_info->iwarp_info_valid = false;
558 irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
559 struct irdma_device *iwdev)
561 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
562 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
564 if (init_attr->create_flags)
567 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
568 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
569 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
572 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
573 if (init_attr->qp_type != IB_QPT_RC &&
574 init_attr->qp_type != IB_QPT_UD &&
575 init_attr->qp_type != IB_QPT_GSI)
578 if (init_attr->qp_type != IB_QPT_RC)
586 irdma_flush_worker(struct work_struct *work)
588 struct delayed_work *dwork = to_delayed_work(work);
589 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
592 spin_lock_irqsave(&iwqp->lock, flags); /* Don't allow more posting while generating completions */
593 irdma_generate_flush_completions(iwqp);
594 spin_unlock_irqrestore(&iwqp->lock, flags);
598 irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
602 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
603 if (iwqp->roce_info.wr_rdresp_en) {
604 acc_flags |= IB_ACCESS_LOCAL_WRITE;
605 acc_flags |= IB_ACCESS_REMOTE_WRITE;
607 if (iwqp->roce_info.rd_en)
608 acc_flags |= IB_ACCESS_REMOTE_READ;
609 if (iwqp->roce_info.bind_en)
610 acc_flags |= IB_ACCESS_MW_BIND;
612 if (iwqp->iwarp_info.wr_rdresp_en) {
613 acc_flags |= IB_ACCESS_LOCAL_WRITE;
614 acc_flags |= IB_ACCESS_REMOTE_WRITE;
616 if (iwqp->iwarp_info.rd_en)
617 acc_flags |= IB_ACCESS_REMOTE_READ;
618 if (iwqp->iwarp_info.bind_en)
619 acc_flags |= IB_ACCESS_MW_BIND;
625 * irdma_query_qp - query qp attributes
627 * @attr: attributes pointer
628 * @attr_mask: Not used
629 * @init_attr: qp attributes to return
632 irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
633 int attr_mask, struct ib_qp_init_attr *init_attr)
635 struct irdma_qp *iwqp = to_iwqp(ibqp);
636 struct irdma_sc_qp *qp = &iwqp->sc_qp;
638 memset(attr, 0, sizeof(*attr));
639 memset(init_attr, 0, sizeof(*init_attr));
641 attr->qp_state = iwqp->ibqp_state;
642 attr->cur_qp_state = iwqp->ibqp_state;
643 attr->cap.max_send_wr = iwqp->max_send_wr;
644 attr->cap.max_recv_wr = iwqp->max_recv_wr;
645 attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
646 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
647 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
648 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
650 if (rdma_protocol_roce(ibqp->device, 1)) {
651 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
652 attr->qkey = iwqp->roce_info.qkey;
653 attr->rq_psn = iwqp->udp_info.epsn;
654 attr->sq_psn = iwqp->udp_info.psn_nxt;
655 attr->dest_qp_num = iwqp->roce_info.dest_qp;
656 attr->pkey_index = iwqp->roce_info.p_key;
657 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
658 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
659 attr->max_rd_atomic = iwqp->roce_info.ord_size;
660 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
663 init_attr->event_handler = iwqp->ibqp.event_handler;
664 init_attr->qp_context = iwqp->ibqp.qp_context;
665 init_attr->send_cq = iwqp->ibqp.send_cq;
666 init_attr->recv_cq = iwqp->ibqp.recv_cq;
667 init_attr->cap = attr->cap;
673 * irdma_modify_qp_roce - modify qp request
674 * @ibqp: qp's pointer for modify
675 * @attr: access attributes
676 * @attr_mask: state mask
680 irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
681 int attr_mask, struct ib_udata *udata)
683 struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
684 struct irdma_qp *iwqp = to_iwqp(ibqp);
685 struct irdma_device *iwdev = iwqp->iwdev;
686 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
687 struct irdma_qp_host_ctx_info *ctx_info;
688 struct irdma_roce_offload_info *roce_info;
689 struct irdma_udp_offload_info *udp_info;
690 struct irdma_modify_qp_info info = {0};
691 struct irdma_modify_qp_resp uresp = {};
692 struct irdma_modify_qp_req ureq = {};
694 u8 issue_modify_qp = 0;
697 ctx_info = &iwqp->ctx_info;
698 roce_info = &iwqp->roce_info;
699 udp_info = &iwqp->udp_info;
701 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
704 if (attr_mask & IB_QP_DEST_QPN)
705 roce_info->dest_qp = attr->dest_qp_num;
707 if (attr_mask & IB_QP_PKEY_INDEX) {
708 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
714 if (attr_mask & IB_QP_QKEY)
715 roce_info->qkey = attr->qkey;
717 if (attr_mask & IB_QP_PATH_MTU)
718 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
720 if (attr_mask & IB_QP_SQ_PSN) {
721 udp_info->psn_nxt = attr->sq_psn;
722 udp_info->lsn = 0xffff;
723 udp_info->psn_una = attr->sq_psn;
724 udp_info->psn_max = attr->sq_psn;
727 if (attr_mask & IB_QP_RQ_PSN)
728 udp_info->epsn = attr->rq_psn;
730 if (attr_mask & IB_QP_RNR_RETRY)
731 udp_info->rnr_nak_thresh = attr->rnr_retry;
733 if (attr_mask & IB_QP_RETRY_CNT)
734 udp_info->rexmit_thresh = attr->retry_cnt;
736 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
738 if (attr_mask & IB_QP_AV) {
739 struct irdma_av *av = &iwqp->roce_ah.av;
740 u16 vlan_id = VLAN_N_VID;
741 u32 local_ip[4] = {};
743 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
744 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
745 udp_info->ttl = attr->ah_attr.grh.hop_limit;
746 udp_info->flow_label = attr->ah_attr.grh.flow_label;
747 udp_info->tos = attr->ah_attr.grh.traffic_class;
748 irdma_qp_rem_qos(&iwqp->sc_qp);
749 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
750 if (iwqp->sc_qp.vsi->dscp_mode)
752 iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
754 ctx_info->user_pri = rt_tos2priority(udp_info->tos);
755 iwqp->sc_qp.user_pri = ctx_info->user_pri;
756 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
758 irdma_qp_add_qos(&iwqp->sc_qp);
760 ret = kc_irdma_set_roce_cm_info(iwqp, attr, &vlan_id);
764 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
766 if (vlan_id < VLAN_N_VID) {
767 udp_info->insert_vlan_tag = true;
768 udp_info->vlan_tag = vlan_id |
769 ctx_info->user_pri << VLAN_PRIO_SHIFT;
771 udp_info->insert_vlan_tag = false;
774 av->attrs = attr->ah_attr;
775 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
776 roce_info->local_qp = ibqp->qp_num;
777 if (av->sgid_addr.saddr.sa_family == AF_INET6) {
779 av->dgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
781 av->sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
783 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
784 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
786 udp_info->ipv4 = false;
787 irdma_copy_ip_ntohl(local_ip, daddr);
789 udp_info->arp_idx = irdma_arp_table(iwdev->rf, local_ip,
790 NULL, IRDMA_ARP_RESOLVE);
792 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
793 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
795 local_ip[0] = ntohl(daddr);
797 udp_info->ipv4 = true;
798 udp_info->dest_ip_addr[0] = 0;
799 udp_info->dest_ip_addr[1] = 0;
800 udp_info->dest_ip_addr[2] = 0;
801 udp_info->dest_ip_addr[3] = local_ip[0];
803 udp_info->local_ipaddr[0] = 0;
804 udp_info->local_ipaddr[1] = 0;
805 udp_info->local_ipaddr[2] = 0;
806 udp_info->local_ipaddr[3] = ntohl(saddr);
809 irdma_add_arp(iwdev->rf, local_ip,
810 ah_attr_to_dmac(attr->ah_attr));
813 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
814 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
815 ibdev_err(&iwdev->ibdev,
816 "rd_atomic = %d, above max_hw_ord=%d\n",
818 dev->hw_attrs.max_hw_ord);
821 if (attr->max_rd_atomic)
822 roce_info->ord_size = attr->max_rd_atomic;
823 info.ord_valid = true;
826 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
827 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
828 ibdev_err(&iwdev->ibdev,
829 "rd_atomic = %d, above max_hw_ird=%d\n",
831 dev->hw_attrs.max_hw_ird);
834 if (attr->max_dest_rd_atomic)
835 roce_info->ird_size = attr->max_dest_rd_atomic;
838 if (attr_mask & IB_QP_ACCESS_FLAGS) {
839 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
840 roce_info->wr_rdresp_en = true;
841 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
842 roce_info->wr_rdresp_en = true;
843 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
844 roce_info->rd_en = true;
847 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
849 irdma_debug(dev, IRDMA_DEBUG_VERBS,
850 "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
851 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state,
852 iwqp->iwarp_state, attr_mask);
854 spin_lock_irqsave(&iwqp->lock, flags);
855 if (attr_mask & IB_QP_STATE) {
856 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, iwqp->ibqp.qp_type, attr_mask)) {
857 irdma_print("modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
858 iwqp->ibqp.qp_num, iwqp->ibqp_state,
863 info.curr_iwarp_state = iwqp->iwarp_state;
865 switch (attr->qp_state) {
867 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
872 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
873 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
878 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
882 info.arp_cache_idx_valid = true;
883 info.cq_num_valid = true;
884 info.next_iwarp_state = IRDMA_QP_STATE_RTR;
888 if (iwqp->ibqp_state < IB_QPS_RTR ||
889 iwqp->ibqp_state == IB_QPS_ERR) {
894 info.arp_cache_idx_valid = true;
895 info.cq_num_valid = true;
896 info.ord_valid = true;
897 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
899 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
900 iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp);
901 udp_info->cwnd = iwdev->roce_cwnd;
902 roce_info->ack_credits = iwdev->roce_ackcreds;
903 if (iwdev->push_mode && udata &&
904 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
905 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
906 spin_unlock_irqrestore(&iwqp->lock, flags);
907 irdma_alloc_push_page(iwqp);
908 spin_lock_irqsave(&iwqp->lock, flags);
912 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
915 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
920 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
926 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
927 spin_unlock_irqrestore(&iwqp->lock, flags);
928 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
929 irdma_hw_modify_qp(iwdev, iwqp, &info, true);
930 spin_lock_irqsave(&iwqp->lock, flags);
933 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
934 spin_unlock_irqrestore(&iwqp->lock, flags);
936 if (ib_copy_from_udata(&ureq, udata,
937 min(sizeof(ureq), udata->inlen)))
940 irdma_flush_wqes(iwqp,
941 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
942 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
948 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
956 iwqp->ibqp_state = attr->qp_state;
959 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
960 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
961 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
962 spin_unlock_irqrestore(&iwqp->lock, flags);
964 if (attr_mask & IB_QP_STATE) {
965 if (issue_modify_qp) {
966 ctx_info->rem_endpoint_idx = udp_info->arp_idx;
967 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
969 spin_lock_irqsave(&iwqp->lock, flags);
970 if (iwqp->iwarp_state == info.curr_iwarp_state) {
971 iwqp->iwarp_state = info.next_iwarp_state;
972 iwqp->ibqp_state = attr->qp_state;
974 if (iwqp->ibqp_state > IB_QPS_RTS &&
975 !iwqp->flush_issued) {
976 iwqp->flush_issued = 1;
977 if (!iwqp->user_mode)
978 queue_delayed_work(iwqp->iwdev->cleanup_wq,
980 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
981 spin_unlock_irqrestore(&iwqp->lock, flags);
982 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
986 spin_unlock_irqrestore(&iwqp->lock, flags);
989 iwqp->ibqp_state = attr->qp_state;
991 if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
992 struct irdma_ucontext *ucontext;
994 ucontext = rdma_udata_to_drv_context(udata,
995 struct irdma_ucontext, ibucontext);
996 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
997 !iwqp->push_wqe_mmap_entry &&
998 !irdma_setup_push_mmap_entries(ucontext, iwqp,
999 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1000 uresp.push_valid = 1;
1001 uresp.push_offset = iwqp->sc_qp.push_offset;
1003 uresp.rd_fence_rate = iwdev->rd_fence_rate;
1004 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1007 irdma_remove_push_mmap_entries(iwqp);
1008 irdma_debug(iwdev_to_idev(iwdev),
1010 "copy_to_udata failed\n");
1018 spin_unlock_irqrestore(&iwqp->lock, flags);
1024 * irdma_modify_qp - modify qp request
1025 * @ibqp: qp's pointer for modify
1026 * @attr: access attributes
1027 * @attr_mask: state mask
1031 irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1032 struct ib_udata *udata)
1034 struct irdma_qp *iwqp = to_iwqp(ibqp);
1035 struct irdma_device *iwdev = iwqp->iwdev;
1036 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1037 struct irdma_qp_host_ctx_info *ctx_info;
1038 struct irdma_tcp_offload_info *tcp_info;
1039 struct irdma_iwarp_offload_info *offload_info;
1040 struct irdma_modify_qp_info info = {0};
1041 struct irdma_modify_qp_resp uresp = {};
1042 struct irdma_modify_qp_req ureq = {};
1043 u8 issue_modify_qp = 0;
1046 unsigned long flags;
1048 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1051 ctx_info = &iwqp->ctx_info;
1052 offload_info = &iwqp->iwarp_info;
1053 tcp_info = &iwqp->tcp_info;
1054 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1055 irdma_debug(dev, IRDMA_DEBUG_VERBS,
1056 "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1057 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state, iwqp->iwarp_state,
1058 iwqp->last_aeq, iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1060 spin_lock_irqsave(&iwqp->lock, flags);
1061 if (attr_mask & IB_QP_STATE) {
1062 info.curr_iwarp_state = iwqp->iwarp_state;
1063 switch (attr->qp_state) {
1066 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1071 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1072 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1073 issue_modify_qp = 1;
1075 if (iwdev->push_mode && udata &&
1076 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1077 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1078 spin_unlock_irqrestore(&iwqp->lock, flags);
1079 irdma_alloc_push_page(iwqp);
1080 spin_lock_irqsave(&iwqp->lock, flags);
1084 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1090 issue_modify_qp = 1;
1091 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1092 iwqp->hte_added = 1;
1093 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1094 info.tcp_ctx_valid = true;
1095 info.ord_valid = true;
1096 info.arp_cache_idx_valid = true;
1097 info.cq_num_valid = true;
1100 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1105 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1106 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1111 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1116 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1117 issue_modify_qp = 1;
1120 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1128 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1129 spin_unlock_irqrestore(&iwqp->lock, flags);
1131 if (ib_copy_from_udata(&ureq, udata,
1132 min(sizeof(ureq), udata->inlen)))
1135 irdma_flush_wqes(iwqp,
1136 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1137 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1143 if (iwqp->sc_qp.term_flags) {
1144 spin_unlock_irqrestore(&iwqp->lock, flags);
1145 irdma_terminate_del_timer(&iwqp->sc_qp);
1146 spin_lock_irqsave(&iwqp->lock, flags);
1148 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1149 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1151 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1152 info.reset_tcp_conn = true;
1156 issue_modify_qp = 1;
1157 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1164 iwqp->ibqp_state = attr->qp_state;
1166 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1167 ctx_info->iwarp_info_valid = true;
1168 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1169 offload_info->wr_rdresp_en = true;
1170 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1171 offload_info->wr_rdresp_en = true;
1172 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1173 offload_info->rd_en = true;
1176 if (ctx_info->iwarp_info_valid) {
1177 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1178 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1179 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1181 spin_unlock_irqrestore(&iwqp->lock, flags);
1183 if (attr_mask & IB_QP_STATE) {
1184 if (issue_modify_qp) {
1185 ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1186 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1190 spin_lock_irqsave(&iwqp->lock, flags);
1191 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1192 iwqp->iwarp_state = info.next_iwarp_state;
1193 iwqp->ibqp_state = attr->qp_state;
1195 spin_unlock_irqrestore(&iwqp->lock, flags);
1198 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1200 if (iwqp->cm_id && iwqp->hw_tcp_state) {
1201 spin_lock_irqsave(&iwqp->lock, flags);
1202 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1203 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1204 spin_unlock_irqrestore(&iwqp->lock, flags);
1205 irdma_cm_disconn(iwqp);
1208 int close_timer_started;
1210 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1212 if (iwqp->cm_node) {
1213 atomic_inc(&iwqp->cm_node->refcnt);
1214 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1215 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1216 if (iwqp->cm_id && close_timer_started == 1)
1217 irdma_schedule_cm_timer(iwqp->cm_node,
1218 (struct irdma_puda_buf *)iwqp,
1219 IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1221 irdma_rem_ref_cm_node(iwqp->cm_node);
1223 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1227 if (attr_mask & IB_QP_STATE && udata &&
1228 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1229 struct irdma_ucontext *ucontext;
1231 ucontext = rdma_udata_to_drv_context(udata,
1232 struct irdma_ucontext, ibucontext);
1233 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1234 !iwqp->push_wqe_mmap_entry &&
1235 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1236 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1237 uresp.push_valid = 1;
1238 uresp.push_offset = iwqp->sc_qp.push_offset;
1240 uresp.rd_fence_rate = iwdev->rd_fence_rate;
1242 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1245 irdma_remove_push_mmap_entries(iwqp);
1246 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1247 "copy_to_udata failed\n");
1254 spin_unlock_irqrestore(&iwqp->lock, flags);
1260 * irdma_cq_free_rsrc - free up resources for cq
1261 * @rf: RDMA PCI function
1265 irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1267 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1269 if (!iwcq->user_mode) {
1270 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem);
1271 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem_shadow);
1274 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1278 * irdma_free_cqbuf - worker to free a cq buffer
1279 * @work: provides access to the cq buffer to free
1282 irdma_free_cqbuf(struct work_struct *work)
1284 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1286 irdma_free_dma_mem(cq_buf->hw, &cq_buf->kmem_buf);
1291 * irdma_process_resize_list - remove resized cq buffers from the resize_list
1292 * @iwcq: cq which owns the resize_list
1293 * @iwdev: irdma device
1294 * @lcqe_buf: the buffer where the last cqe is received
1297 irdma_process_resize_list(struct irdma_cq *iwcq,
1298 struct irdma_device *iwdev,
1299 struct irdma_cq_buf *lcqe_buf)
1301 struct list_head *tmp_node, *list_node;
1302 struct irdma_cq_buf *cq_buf;
1305 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1306 cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1307 if (cq_buf == lcqe_buf)
1310 list_del(&cq_buf->list);
1311 queue_work(iwdev->cleanup_wq, &cq_buf->work);
1319 * irdma_resize_cq - resize cq
1320 * @ibcq: cq to be resized
1321 * @entries: desired cq size
1325 irdma_resize_cq(struct ib_cq *ibcq, int entries,
1326 struct ib_udata *udata)
1328 struct irdma_cq *iwcq = to_iwcq(ibcq);
1329 struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1330 struct irdma_cqp_request *cqp_request;
1331 struct cqp_cmds_info *cqp_info;
1332 struct irdma_modify_cq_info *m_info;
1333 struct irdma_modify_cq_info info = {0};
1334 struct irdma_dma_mem kmem_buf;
1335 struct irdma_cq_mr *cqmr_buf;
1336 struct irdma_pbl *iwpbl_buf;
1337 struct irdma_device *iwdev;
1338 struct irdma_pci_f *rf;
1339 struct irdma_cq_buf *cq_buf = NULL;
1340 unsigned long flags;
1343 iwdev = to_iwdev(ibcq->device);
1346 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1347 IRDMA_FEATURE_CQ_RESIZE))
1350 if (entries > rf->max_cqe)
1353 if (!iwcq->user_mode) {
1355 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1359 info.cq_size = max(entries, 4);
1361 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1365 struct irdma_resize_cq_req req = {0};
1366 struct irdma_ucontext *ucontext =
1367 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1370 /* CQ resize not supported with legacy GEN_1 libi40iw */
1371 if (ucontext->legacy_mode)
1374 if (ib_copy_from_udata(&req, udata,
1375 min(sizeof(req), udata->inlen)))
1378 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1379 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1380 &ucontext->cq_reg_mem_list);
1381 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1386 cqmr_buf = &iwpbl_buf->cq_mr;
1387 if (iwpbl_buf->pbl_allocated) {
1388 info.virtual_map = true;
1389 info.pbl_chunk_size = 1;
1390 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1392 info.cq_pa = cqmr_buf->cq_pbl.addr;
1395 /* Kmode CQ resize */
1398 rsize = info.cq_size * sizeof(struct irdma_cqe);
1399 kmem_buf.size = round_up(rsize, 256);
1400 kmem_buf.va = irdma_allocate_dma_mem(dev->hw, &kmem_buf,
1401 kmem_buf.size, 256);
1405 info.cq_base = kmem_buf.va;
1406 info.cq_pa = kmem_buf.pa;
1407 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1414 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1420 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1421 info.cq_resize = true;
1423 cqp_info = &cqp_request->info;
1424 m_info = &cqp_info->in.u.cq_modify.info;
1425 memcpy(m_info, &info, sizeof(*m_info));
1427 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1428 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1429 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1430 cqp_info->post_sq = 1;
1431 ret = irdma_handle_cqp_op(rf, cqp_request);
1432 irdma_put_cqp_request(&rf->cqp, cqp_request);
1436 spin_lock_irqsave(&iwcq->lock, flags);
1438 cq_buf->kmem_buf = iwcq->kmem;
1439 cq_buf->hw = dev->hw;
1440 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1441 INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1442 list_add_tail(&cq_buf->list, &iwcq->resize_list);
1443 iwcq->kmem = kmem_buf;
1446 irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1447 ibcq->cqe = info.cq_size - 1;
1448 spin_unlock_irqrestore(&iwcq->lock, flags);
1453 irdma_free_dma_mem(dev->hw, &kmem_buf);
1460 * irdma_get_mr_access - get hw MR access permissions from IB access flags
1461 * @access: IB access flags
1463 static inline u16 irdma_get_mr_access(int access){
1466 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
1467 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
1468 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
1469 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
1470 hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
1471 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
1472 hw_access |= (access & IB_ACCESS_MW_BIND) ?
1473 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
1474 hw_access |= (access & IB_ZERO_BASED) ?
1475 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
1476 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
1482 * irdma_free_stag - free stag resource
1483 * @iwdev: irdma device
1484 * @stag: stag to free
1487 irdma_free_stag(struct irdma_device *iwdev, u32 stag)
1491 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
1492 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
1496 * irdma_create_stag - create random stag
1497 * @iwdev: irdma device
1500 irdma_create_stag(struct irdma_device *iwdev)
1504 u32 next_stag_index;
1510 get_random_bytes(&random, sizeof(random));
1511 consumer_key = (u8)random;
1513 driver_key = random & ~iwdev->rf->mr_stagmask;
1514 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
1515 next_stag_index %= iwdev->rf->max_mr;
1517 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
1518 iwdev->rf->max_mr, &stag_index,
1522 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
1524 stag += (u32)consumer_key;
1530 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
1531 * @arr: lvl1 pbl array
1532 * @npages: page count
1533 * @pg_size: page size
1537 irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1541 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1542 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1550 * irdma_check_mr_contiguous - check if MR is physically contiguous
1551 * @palloc: pbl allocation struct
1552 * @pg_size: page size
1555 irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
1558 struct irdma_pble_level2 *lvl2 = &palloc->level2;
1559 struct irdma_pble_info *leaf = lvl2->leaf;
1561 u64 *start_addr = NULL;
1565 if (palloc->level == PBLE_LEVEL_1) {
1566 arr = palloc->level1.addr;
1567 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
1572 start_addr = leaf->addr;
1574 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1576 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1578 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
1587 * irdma_setup_pbles - copy user pg address to pble's
1588 * @rf: RDMA PCI function
1589 * @iwmr: mr pointer for this memory registration
1590 * @use_pbles: flag if to use pble's
1593 irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
1596 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1597 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1598 struct irdma_pble_info *pinfo;
1601 enum irdma_pble_level level = PBLE_LEVEL_1;
1604 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
1609 iwpbl->pbl_allocated = true;
1610 level = palloc->level;
1611 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
1612 palloc->level2.leaf;
1615 pbl = iwmr->pgaddrmem;
1618 irdma_copy_user_pgaddrs(iwmr, pbl, level);
1621 iwmr->pgaddrmem[0] = *pbl;
1627 * irdma_handle_q_mem - handle memory for qp and cq
1628 * @iwdev: irdma device
1629 * @req: information for q memory management
1630 * @iwpbl: pble struct
1631 * @use_pbles: flag to use pble
1634 irdma_handle_q_mem(struct irdma_device *iwdev,
1635 struct irdma_mem_reg_req *req,
1636 struct irdma_pbl *iwpbl, bool use_pbles)
1638 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1639 struct irdma_mr *iwmr = iwpbl->iwmr;
1640 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
1641 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
1642 struct irdma_hmc_pble *hmc_p;
1643 u64 *arr = iwmr->pgaddrmem;
1648 pg_size = iwmr->page_size;
1649 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
1653 if (use_pbles && palloc->level != PBLE_LEVEL_1) {
1654 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
1655 iwpbl->pbl_allocated = false;
1660 arr = palloc->level1.addr;
1662 switch (iwmr->type) {
1663 case IRDMA_MEMREG_TYPE_QP:
1664 total = req->sq_pages + req->rq_pages;
1665 hmc_p = &qpmr->sq_pbl;
1666 qpmr->shadow = (dma_addr_t) arr[total];
1668 ret = irdma_check_mem_contiguous(arr, req->sq_pages,
1671 ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
1677 hmc_p->idx = palloc->level1.idx;
1678 hmc_p = &qpmr->rq_pbl;
1679 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1681 hmc_p->addr = arr[0];
1682 hmc_p = &qpmr->rq_pbl;
1683 hmc_p->addr = arr[req->sq_pages];
1686 case IRDMA_MEMREG_TYPE_CQ:
1687 hmc_p = &cqmr->cq_pbl;
1690 cqmr->shadow = (dma_addr_t) arr[req->cq_pages];
1693 ret = irdma_check_mem_contiguous(arr, req->cq_pages,
1697 hmc_p->idx = palloc->level1.idx;
1699 hmc_p->addr = arr[0];
1702 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1707 if (use_pbles && ret) {
1708 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
1709 iwpbl->pbl_allocated = false;
1716 * irdma_hw_alloc_mw - create the hw memory window
1717 * @iwdev: irdma device
1718 * @iwmr: pointer to memory window info
1721 irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
1723 struct irdma_mw_alloc_info *info;
1724 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1725 struct irdma_cqp_request *cqp_request;
1726 struct cqp_cmds_info *cqp_info;
1729 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
1733 cqp_info = &cqp_request->info;
1734 info = &cqp_info->in.u.mw_alloc.info;
1735 memset(info, 0, sizeof(*info));
1736 if (iwmr->ibmw.type == IB_MW_TYPE_1)
1737 info->mw_wide = true;
1739 info->page_size = PAGE_SIZE;
1740 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
1741 info->pd_id = iwpd->sc_pd.pd_id;
1742 info->remote_access = true;
1743 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
1744 cqp_info->post_sq = 1;
1745 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
1746 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
1747 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
1748 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
1754 * irdma_dealloc_mw - Dealloc memory window
1755 * @ibmw: memory window structure.
1758 irdma_dealloc_mw(struct ib_mw *ibmw)
1760 struct ib_pd *ibpd = ibmw->pd;
1761 struct irdma_pd *iwpd = to_iwpd(ibpd);
1762 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
1763 struct irdma_device *iwdev = to_iwdev(ibmw->device);
1764 struct irdma_cqp_request *cqp_request;
1765 struct cqp_cmds_info *cqp_info;
1766 struct irdma_dealloc_stag_info *info;
1768 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
1772 cqp_info = &cqp_request->info;
1773 info = &cqp_info->in.u.dealloc_stag.info;
1774 memset(info, 0, sizeof(*info));
1775 info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
1776 info->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S);
1778 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
1779 cqp_info->post_sq = 1;
1780 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
1781 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
1782 irdma_handle_cqp_op(iwdev->rf, cqp_request);
1783 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
1784 irdma_free_stag(iwdev, iwmr->stag);
1791 * irdma_hw_alloc_stag - cqp command to allocate stag
1792 * @iwdev: irdma device
1793 * @iwmr: irdma mr pointer
1796 irdma_hw_alloc_stag(struct irdma_device *iwdev,
1797 struct irdma_mr *iwmr)
1799 struct irdma_allocate_stag_info *info;
1800 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1802 struct irdma_cqp_request *cqp_request;
1803 struct cqp_cmds_info *cqp_info;
1805 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
1809 cqp_info = &cqp_request->info;
1810 info = &cqp_info->in.u.alloc_stag.info;
1811 memset(info, 0, sizeof(*info));
1812 info->page_size = PAGE_SIZE;
1813 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
1814 info->pd_id = iwpd->sc_pd.pd_id;
1815 info->total_len = iwmr->len;
1816 info->remote_access = true;
1817 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
1818 cqp_info->post_sq = 1;
1819 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
1820 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1821 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
1822 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
1828 * irdma_set_page - populate pbl list for fmr
1829 * @ibmr: ib mem to access iwarp mr pointer
1830 * @addr: page dma address fro pbl list
1833 irdma_set_page(struct ib_mr *ibmr, u64 addr)
1835 struct irdma_mr *iwmr = to_iwmr(ibmr);
1836 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1837 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1840 if (unlikely(iwmr->npages == iwmr->page_cnt))
1843 pbl = palloc->level1.addr;
1844 pbl[iwmr->npages++] = addr;
1850 * irdma_map_mr_sg - map of sg list for fmr
1851 * @ibmr: ib mem to access iwarp mr pointer
1852 * @sg: scatter gather list
1853 * @sg_nents: number of sg pages
1854 * @sg_offset: scatter gather list for fmr
1857 irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1858 int sg_nents, unsigned int *sg_offset)
1860 struct irdma_mr *iwmr = to_iwmr(ibmr);
1864 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
1868 * irdma_hwreg_mr - send cqp command for memory registration
1869 * @iwdev: irdma device
1870 * @iwmr: irdma mr pointer
1871 * @access: access for MR
1874 irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
1877 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1878 struct irdma_reg_ns_stag_info *stag_info;
1879 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1880 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1881 struct irdma_cqp_request *cqp_request;
1882 struct cqp_cmds_info *cqp_info;
1885 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
1889 cqp_info = &cqp_request->info;
1890 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1891 memset(stag_info, 0, sizeof(*stag_info));
1892 stag_info->va = iwpbl->user_base;
1893 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
1894 stag_info->stag_key = (u8)iwmr->stag;
1895 stag_info->total_len = iwmr->len;
1896 stag_info->access_rights = irdma_get_mr_access(access);
1897 stag_info->pd_id = iwpd->sc_pd.pd_id;
1898 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
1899 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
1901 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
1902 stag_info->page_size = iwmr->page_size;
1904 if (iwpbl->pbl_allocated) {
1905 if (palloc->level == PBLE_LEVEL_1) {
1906 stag_info->first_pm_pbl_index = palloc->level1.idx;
1907 stag_info->chunk_size = 1;
1909 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1910 stag_info->chunk_size = 3;
1913 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1916 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
1917 cqp_info->post_sq = 1;
1918 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
1919 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1920 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
1921 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
1927 * irdma_reg_user_mr - Register a user memory region
1929 * @start: virtual start address
1930 * @len: length of mr
1931 * @virt: virtual address
1932 * @access: access of mr
1935 static struct ib_mr *
1936 irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1937 u64 virt, int access,
1938 struct ib_udata *udata)
1940 struct irdma_device *iwdev = to_iwdev(pd->device);
1941 struct irdma_ucontext *ucontext;
1942 struct irdma_pble_alloc *palloc;
1943 struct irdma_pbl *iwpbl;
1944 struct irdma_mr *iwmr;
1945 struct ib_umem *region;
1946 struct irdma_mem_reg_req req;
1947 u32 total, stag = 0;
1948 u8 shadow_pgcnt = 1;
1949 bool use_pbles = false;
1950 unsigned long flags;
1954 if (!len || len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
1955 return ERR_PTR(-EINVAL);
1957 region = ib_umem_get(pd->uobject->context, start, len, access, 0);
1959 if (IS_ERR(region)) {
1960 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1961 "Failed to create ib_umem region\n");
1962 return (struct ib_mr *)region;
1965 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
1966 ib_umem_release(region);
1967 return ERR_PTR(-EFAULT);
1970 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1972 ib_umem_release(region);
1973 return ERR_PTR(-ENOMEM);
1976 iwpbl = &iwmr->iwpbl;
1978 iwmr->region = region;
1980 iwmr->ibmr.device = pd->device;
1981 iwmr->ibmr.iova = virt;
1982 iwmr->page_size = PAGE_SIZE;
1984 iwmr->page_msk = PAGE_MASK;
1985 iwmr->len = region->length;
1986 iwpbl->user_base = virt;
1987 palloc = &iwpbl->pble_alloc;
1988 iwmr->type = req.reg_type;
1989 iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, virt);
1991 switch (req.reg_type) {
1992 case IRDMA_MEMREG_TYPE_QP:
1993 total = req.sq_pages + req.rq_pages + shadow_pgcnt;
1994 if (total > iwmr->page_cnt) {
1998 total = req.sq_pages + req.rq_pages;
1999 use_pbles = (total > 2);
2000 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2004 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2006 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2007 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2008 iwpbl->on_list = true;
2009 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2011 case IRDMA_MEMREG_TYPE_CQ:
2012 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2014 total = req.cq_pages + shadow_pgcnt;
2015 if (total > iwmr->page_cnt) {
2020 use_pbles = (req.cq_pages > 1);
2021 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2025 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2027 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2028 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2029 iwpbl->on_list = true;
2030 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2032 case IRDMA_MEMREG_TYPE_MEM:
2033 use_pbles = (iwmr->page_cnt != 1);
2035 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
2040 ret = irdma_check_mr_contiguous(palloc,
2043 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2044 iwpbl->pbl_allocated = false;
2048 stag = irdma_create_stag(iwdev);
2055 iwmr->ibmr.rkey = stag;
2056 iwmr->ibmr.lkey = stag;
2057 err = irdma_hwreg_mr(iwdev, iwmr, access);
2059 irdma_free_stag(iwdev, stag);
2068 iwmr->type = req.reg_type;
2073 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2074 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2075 ib_umem_release(region);
2078 return ERR_PTR(err);
2082 * irdma_reg_phys_mr - register kernel physical memory
2084 * @addr: physical address of memory to register
2085 * @size: size of memory to register
2086 * @access: Access rights
2087 * @iova_start: start of virtual address for physical buffers
2090 irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
2093 struct irdma_device *iwdev = to_iwdev(pd->device);
2094 struct irdma_pbl *iwpbl;
2095 struct irdma_mr *iwmr;
2099 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2101 return ERR_PTR(-ENOMEM);
2104 iwmr->ibmr.device = pd->device;
2105 iwpbl = &iwmr->iwpbl;
2107 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2108 iwpbl->user_base = *iova_start;
2109 stag = irdma_create_stag(iwdev);
2116 iwmr->ibmr.iova = *iova_start;
2117 iwmr->ibmr.rkey = stag;
2118 iwmr->ibmr.lkey = stag;
2120 iwmr->pgaddrmem[0] = addr;
2122 iwmr->page_size = SZ_4K;
2123 ret = irdma_hwreg_mr(iwdev, iwmr, access);
2125 irdma_free_stag(iwdev, stag);
2134 return ERR_PTR(ret);
2138 * irdma_get_dma_mr - register physical mem
2140 * @acc: access for memory
2142 static struct ib_mr *
2143 irdma_get_dma_mr(struct ib_pd *pd, int acc)
2147 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
2151 * irdma_del_memlist - Deleting pbl list entries for CQ/QP
2152 * @iwmr: iwmr for IB's user page addresses
2153 * @ucontext: ptr to user context
2156 irdma_del_memlist(struct irdma_mr *iwmr,
2157 struct irdma_ucontext *ucontext)
2159 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2160 unsigned long flags;
2162 switch (iwmr->type) {
2163 case IRDMA_MEMREG_TYPE_CQ:
2164 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2165 if (iwpbl->on_list) {
2166 iwpbl->on_list = false;
2167 list_del(&iwpbl->list);
2169 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2171 case IRDMA_MEMREG_TYPE_QP:
2172 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2173 if (iwpbl->on_list) {
2174 iwpbl->on_list = false;
2175 list_del(&iwpbl->list);
2177 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2185 * irdma_copy_sg_list - copy sg list for qp
2186 * @sg_list: copied into sg_list
2187 * @sgl: copy from sgl
2188 * @num_sges: count of sg entries
2191 irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
2196 for (i = 0; i < num_sges; i++) {
2197 sg_list[i].tag_off = sgl[i].addr;
2198 sg_list[i].len = sgl[i].length;
2199 sg_list[i].stag = sgl[i].lkey;
2204 * irdma_post_send - kernel application wr
2205 * @ibqp: qp ptr for wr
2206 * @ib_wr: work request ptr
2207 * @bad_wr: return of bad wr if err
2210 irdma_post_send(struct ib_qp *ibqp,
2211 const struct ib_send_wr *ib_wr,
2212 const struct ib_send_wr **bad_wr)
2214 struct irdma_qp *iwqp;
2215 struct irdma_qp_uk *ukqp;
2216 struct irdma_sc_dev *dev;
2217 struct irdma_post_sq_info info;
2219 unsigned long flags;
2221 struct irdma_ah *ah;
2223 iwqp = to_iwqp(ibqp);
2224 ukqp = &iwqp->sc_qp.qp_uk;
2225 dev = &iwqp->iwdev->rf->sc_dev;
2227 spin_lock_irqsave(&iwqp->lock, flags);
2229 memset(&info, 0, sizeof(info));
2231 info.wr_id = (ib_wr->wr_id);
2232 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2233 info.signaled = true;
2234 if (ib_wr->send_flags & IB_SEND_FENCE)
2235 info.read_fence = true;
2236 switch (ib_wr->opcode) {
2237 case IB_WR_SEND_WITH_IMM:
2238 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
2239 info.imm_data_valid = true;
2240 info.imm_data = ntohl(ib_wr->ex.imm_data);
2247 case IB_WR_SEND_WITH_INV:
2248 if (ib_wr->opcode == IB_WR_SEND ||
2249 ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
2250 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2251 info.op_type = IRDMA_OP_TYPE_SEND_SOL;
2253 info.op_type = IRDMA_OP_TYPE_SEND;
2255 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2256 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
2258 info.op_type = IRDMA_OP_TYPE_SEND_INV;
2259 info.stag_to_inv = ib_wr->ex.invalidate_rkey;
2262 if (ib_wr->send_flags & IB_SEND_INLINE) {
2263 info.op.inline_send.data = (void *)(unsigned long)
2264 ib_wr->sg_list[0].addr;
2265 info.op.inline_send.len = ib_wr->sg_list[0].length;
2266 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
2267 iwqp->ibqp.qp_type == IB_QPT_GSI) {
2268 ah = to_iwah(ud_wr(ib_wr)->ah);
2269 info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
2270 info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
2271 info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
2273 err = irdma_uk_inline_send(ukqp, &info, false);
2275 info.op.send.num_sges = ib_wr->num_sge;
2276 info.op.send.sg_list = (struct irdma_sge *)
2278 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
2279 iwqp->ibqp.qp_type == IB_QPT_GSI) {
2280 ah = to_iwah(ud_wr(ib_wr)->ah);
2281 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
2282 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
2283 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
2285 err = irdma_uk_send(ukqp, &info, false);
2288 case IB_WR_RDMA_WRITE_WITH_IMM:
2289 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
2290 info.imm_data_valid = true;
2291 info.imm_data = ntohl(ib_wr->ex.imm_data);
2297 case IB_WR_RDMA_WRITE:
2298 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2299 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
2301 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
2303 if (ib_wr->send_flags & IB_SEND_INLINE) {
2304 info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
2305 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2306 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2307 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2308 err = irdma_uk_inline_rdma_write(ukqp, &info, false);
2310 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2311 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2312 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2313 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2314 err = irdma_uk_rdma_write(ukqp, &info, false);
2317 case IB_WR_RDMA_READ_WITH_INV:
2320 case IB_WR_RDMA_READ:
2321 if (ib_wr->num_sge >
2322 dev->hw_attrs.uk_attrs.max_hw_read_sges) {
2326 info.op_type = IRDMA_OP_TYPE_RDMA_READ;
2327 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2328 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2329 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
2330 info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
2331 err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
2333 case IB_WR_LOCAL_INV:
2334 info.op_type = IRDMA_OP_TYPE_INV_STAG;
2335 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2336 err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
2339 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2340 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2341 struct irdma_fast_reg_stag_info stag_info = {0};
2343 stag_info.signaled = info.signaled;
2344 stag_info.read_fence = info.read_fence;
2345 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
2346 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
2347 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
2348 stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
2349 stag_info.wr_id = ib_wr->wr_id;
2350 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2351 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2352 stag_info.total_len = iwmr->ibmr.length;
2353 stag_info.reg_addr_pa = *palloc->level1.addr;
2354 stag_info.first_pm_pbl_index = palloc->level1.idx;
2355 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2356 if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
2357 stag_info.chunk_size = 1;
2358 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
2364 irdma_debug(iwdev_to_idev(iwqp->iwdev),
2366 "upost_send bad opcode = 0x%x\n",
2373 ib_wr = ib_wr->next;
2376 if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
2377 irdma_uk_qp_post_wr(ukqp);
2378 else if (iwqp->flush_issued)
2379 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS);
2380 spin_unlock_irqrestore(&iwqp->lock, flags);
2388 * irdma_post_recv - post receive wr for kernel application
2389 * @ibqp: ib qp pointer
2390 * @ib_wr: work request for receive
2391 * @bad_wr: bad wr caused an error
2394 irdma_post_recv(struct ib_qp *ibqp,
2395 const struct ib_recv_wr *ib_wr,
2396 const struct ib_recv_wr **bad_wr)
2398 struct irdma_qp *iwqp = to_iwqp(ibqp);
2399 struct irdma_qp_uk *ukqp = &iwqp->sc_qp.qp_uk;
2400 struct irdma_post_rq_info post_recv = {0};
2401 struct irdma_sge *sg_list = iwqp->sg_list;
2402 unsigned long flags;
2405 spin_lock_irqsave(&iwqp->lock, flags);
2408 if (ib_wr->num_sge > ukqp->max_rq_frag_cnt) {
2412 post_recv.num_sges = ib_wr->num_sge;
2413 post_recv.wr_id = ib_wr->wr_id;
2414 irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2415 post_recv.sg_list = sg_list;
2416 err = irdma_uk_post_receive(ukqp, &post_recv);
2418 irdma_debug(iwdev_to_idev(iwqp->iwdev),
2419 IRDMA_DEBUG_VERBS, "post_recv err %d\n",
2424 ib_wr = ib_wr->next;
2428 if (iwqp->flush_issued)
2429 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS);
2430 spin_unlock_irqrestore(&iwqp->lock, flags);
2438 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
2439 * @opcode: iwarp flush code
2441 static enum ib_wc_status
2442 irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
2445 case FLUSH_PROT_ERR:
2446 return IB_WC_LOC_PROT_ERR;
2447 case FLUSH_REM_ACCESS_ERR:
2448 return IB_WC_REM_ACCESS_ERR;
2449 case FLUSH_LOC_QP_OP_ERR:
2450 return IB_WC_LOC_QP_OP_ERR;
2451 case FLUSH_REM_OP_ERR:
2452 return IB_WC_REM_OP_ERR;
2453 case FLUSH_LOC_LEN_ERR:
2454 return IB_WC_LOC_LEN_ERR;
2455 case FLUSH_GENERAL_ERR:
2456 return IB_WC_WR_FLUSH_ERR;
2457 case FLUSH_MW_BIND_ERR:
2458 return IB_WC_MW_BIND_ERR;
2459 case FLUSH_RETRY_EXC_ERR:
2460 return IB_WC_RETRY_EXC_ERR;
2461 case FLUSH_FATAL_ERR:
2463 return IB_WC_FATAL_ERR;
2468 * irdma_process_cqe - process cqe info
2469 * @entry: processed cqe
2470 * @cq_poll_info: cqe info
2473 irdma_process_cqe(struct ib_wc *entry,
2474 struct irdma_cq_poll_info *cq_poll_info)
2476 struct irdma_sc_qp *qp;
2478 entry->wc_flags = 0;
2479 entry->pkey_index = 0;
2480 entry->wr_id = cq_poll_info->wr_id;
2482 qp = cq_poll_info->qp_handle;
2483 entry->qp = qp->qp_uk.back_qp;
2485 if (cq_poll_info->error) {
2486 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
2487 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
2489 entry->vendor_err = cq_poll_info->major_err << 16 |
2490 cq_poll_info->minor_err;
2492 entry->status = IB_WC_SUCCESS;
2493 if (cq_poll_info->imm_valid) {
2494 entry->ex.imm_data = htonl(cq_poll_info->imm_data);
2495 entry->wc_flags |= IB_WC_WITH_IMM;
2497 if (cq_poll_info->ud_smac_valid) {
2498 ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
2499 entry->wc_flags |= IB_WC_WITH_SMAC;
2502 if (cq_poll_info->ud_vlan_valid) {
2503 u16 vlan = cq_poll_info->ud_vlan & EVL_VLID_MASK;
2505 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
2507 entry->vlan_id = vlan;
2508 entry->wc_flags |= IB_WC_WITH_VLAN;
2515 switch (cq_poll_info->op_type) {
2516 case IRDMA_OP_TYPE_RDMA_WRITE:
2517 case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
2518 entry->opcode = IB_WC_RDMA_WRITE;
2520 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
2521 case IRDMA_OP_TYPE_RDMA_READ:
2522 entry->opcode = IB_WC_RDMA_READ;
2524 case IRDMA_OP_TYPE_SEND_INV:
2525 case IRDMA_OP_TYPE_SEND_SOL:
2526 case IRDMA_OP_TYPE_SEND_SOL_INV:
2527 case IRDMA_OP_TYPE_SEND:
2528 entry->opcode = IB_WC_SEND;
2530 case IRDMA_OP_TYPE_FAST_REG_NSMR:
2531 entry->opcode = IB_WC_REG_MR;
2533 case IRDMA_OP_TYPE_INV_STAG:
2534 entry->opcode = IB_WC_LOCAL_INV;
2536 case IRDMA_OP_TYPE_REC_IMM:
2537 case IRDMA_OP_TYPE_REC:
2538 entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ?
2539 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
2540 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
2541 cq_poll_info->stag_invalid_set) {
2542 entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
2543 entry->wc_flags |= IB_WC_WITH_INVALIDATE;
2547 ibdev_err(&iwqp->iwdev->ibdev,
2548 "Invalid opcode = %d in CQE\n", cq_poll_info->op_type);
2549 entry->status = IB_WC_GENERAL_ERR;
2553 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
2554 entry->src_qp = cq_poll_info->ud_src_qpn;
2557 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
2558 entry->network_hdr_type = cq_poll_info->ipv4 ?
2562 entry->src_qp = cq_poll_info->qp_id;
2565 entry->byte_len = cq_poll_info->bytes_xfered;
2569 * irdma_poll_one - poll one entry of the CQ
2570 * @ukcq: ukcq to poll
2571 * @cur_cqe: current CQE info to be filled in
2572 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
2574 * Returns the internal irdma device error code or 0 on success
2577 irdma_poll_one(struct irdma_cq_uk *ukcq,
2578 struct irdma_cq_poll_info *cur_cqe,
2579 struct ib_wc *entry)
2581 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
2586 irdma_process_cqe(entry, cur_cqe);
2592 * __irdma_poll_cq - poll cq for completion (kernel apps)
2594 * @num_entries: number of entries to poll
2595 * @entry: wr of a completed entry
2598 __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
2600 struct list_head *tmp_node, *list_node;
2601 struct irdma_cq_buf *last_buf = NULL;
2602 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
2603 struct irdma_cq_buf *cq_buf;
2605 struct irdma_device *iwdev;
2606 struct irdma_cq_uk *ukcq;
2607 bool cq_new_cqe = false;
2608 int resized_bufs = 0;
2611 iwdev = to_iwdev(iwcq->ibcq.device);
2612 ukcq = &iwcq->sc_cq.cq_uk;
2614 /* go through the list of previously resized CQ buffers */
2615 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
2616 cq_buf = container_of(list_node, struct irdma_cq_buf, list);
2617 while (npolled < num_entries) {
2618 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
2626 /* QP using the CQ is destroyed. Skip reporting this CQE */
2627 if (ret == -EFAULT) {
2634 /* save the resized CQ buffer which received the last cqe */
2640 /* check the current CQ for new cqes */
2641 while (npolled < num_entries) {
2642 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
2643 if (ret == -ENOENT) {
2644 ret = irdma_generated_cmpls(iwcq, cur_cqe);
2646 irdma_process_cqe(entry + npolled, cur_cqe);
2656 /* QP using the CQ is destroyed. Skip reporting this CQE */
2657 if (ret == -EFAULT) {
2665 /* all previous CQ resizes are complete */
2666 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
2668 /* only CQ resizes up to the last_buf are complete */
2669 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
2671 /* report to the HW the number of complete CQ resizes */
2672 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
2676 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
2677 "%s: Error polling CQ, irdma_err: %d\n", __func__, ret);
2683 * irdma_poll_cq - poll cq for completion (kernel apps)
2685 * @num_entries: number of entries to poll
2686 * @entry: wr of a completed entry
2689 irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
2690 struct ib_wc *entry)
2692 struct irdma_cq *iwcq;
2693 unsigned long flags;
2696 iwcq = to_iwcq(ibcq);
2698 spin_lock_irqsave(&iwcq->lock, flags);
2699 ret = __irdma_poll_cq(iwcq, num_entries, entry);
2700 spin_unlock_irqrestore(&iwcq->lock, flags);
2706 * irdma_req_notify_cq - arm cq kernel application
2708 * @notify_flags: notofication flags
2711 irdma_req_notify_cq(struct ib_cq *ibcq,
2712 enum ib_cq_notify_flags notify_flags)
2714 struct irdma_cq *iwcq;
2715 struct irdma_cq_uk *ukcq;
2716 unsigned long flags;
2717 enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
2718 bool promo_event = false;
2721 iwcq = to_iwcq(ibcq);
2722 ukcq = &iwcq->sc_cq.cq_uk;
2724 spin_lock_irqsave(&iwcq->lock, flags);
2725 if (notify_flags == IB_CQ_SOLICITED) {
2726 cq_notify = IRDMA_CQ_COMPL_SOLICITED;
2728 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED)
2732 if (!iwcq->armed || promo_event) {
2734 iwcq->last_notify = cq_notify;
2735 irdma_uk_cq_request_notification(ukcq, cq_notify);
2738 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
2740 spin_unlock_irqrestore(&iwcq->lock, flags);
2745 const char *const irdma_hw_stat_names[] = {
2747 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2748 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2749 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2750 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2751 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2752 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2753 [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2754 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2755 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2756 [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
2758 [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets",
2759 [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts",
2760 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = "ip4InReasmRqd",
2761 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts",
2762 [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets",
2763 [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts",
2764 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = "ip4OutSegRqd",
2765 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts",
2766 [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets",
2767 [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts",
2768 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = "ip6InReasmRqd",
2769 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts",
2770 [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets",
2771 [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts",
2772 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = "ip6OutSegRqd",
2773 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts",
2774 [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = "tcpInSegs",
2775 [IRDMA_HW_STAT_INDEX_TCPTXSEG] = "tcpOutSegs",
2776 [IRDMA_HW_STAT_INDEX_RDMARXRDS] = "iwInRdmaReads",
2777 [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = "iwInRdmaSends",
2778 [IRDMA_HW_STAT_INDEX_RDMARXWRS] = "iwInRdmaWrites",
2779 [IRDMA_HW_STAT_INDEX_RDMATXRDS] = "iwOutRdmaReads",
2780 [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = "iwOutRdmaSends",
2781 [IRDMA_HW_STAT_INDEX_RDMATXWRS] = "iwOutRdmaWrites",
2782 [IRDMA_HW_STAT_INDEX_RDMAVBND] = "iwRdmaBnd",
2783 [IRDMA_HW_STAT_INDEX_RDMAVINV] = "iwRdmaInv",
2786 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
2787 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
2788 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
2790 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets",
2791 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets",
2792 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets",
2793 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets",
2794 [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = "RxUDP",
2795 [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = "TxUDP",
2796 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = "RxECNMrkd",
2800 * mcast_list_add - Add a new mcast item to list
2801 * @rf: RDMA PCI function
2802 * @new_elem: pointer to element to add
2805 mcast_list_add(struct irdma_pci_f *rf,
2806 struct mc_table_list *new_elem)
2808 list_add(&new_elem->list, &rf->mc_qht_list.list);
2812 * mcast_list_del - Remove an mcast item from list
2813 * @mc_qht_elem: pointer to mcast table list element
2816 mcast_list_del(struct mc_table_list *mc_qht_elem)
2819 list_del(&mc_qht_elem->list);
2823 * mcast_list_lookup_ip - Search mcast list for address
2824 * @rf: RDMA PCI function
2825 * @ip_mcast: pointer to mcast IP address
2827 static struct mc_table_list *
2828 mcast_list_lookup_ip(struct irdma_pci_f *rf,
2831 struct mc_table_list *mc_qht_el;
2832 struct list_head *pos, *q;
2834 list_for_each_safe(pos, q, &rf->mc_qht_list.list) {
2835 mc_qht_el = list_entry(pos, struct mc_table_list, list);
2836 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
2837 sizeof(mc_qht_el->mc_info.dest_ip)))
2845 * irdma_mcast_cqp_op - perform a mcast cqp operation
2846 * @iwdev: irdma device
2847 * @mc_grp_ctx: mcast group info
2850 * returns error status
2853 irdma_mcast_cqp_op(struct irdma_device *iwdev,
2854 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
2856 struct cqp_cmds_info *cqp_info;
2857 struct irdma_cqp_request *cqp_request;
2860 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2864 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
2865 cqp_info = &cqp_request->info;
2866 cqp_info->cqp_cmd = op;
2867 cqp_info->post_sq = 1;
2868 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
2869 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
2870 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2871 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2877 * irdma_attach_mcast - attach a qp to a multicast group
2879 * @ibgid: pointer to global ID
2882 * returns error status
2885 irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
2887 struct irdma_qp *iwqp = to_iwqp(ibqp);
2888 struct irdma_device *iwdev = iwqp->iwdev;
2889 struct irdma_pci_f *rf = iwdev->rf;
2890 struct mc_table_list *mc_qht_elem;
2891 struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
2892 unsigned long flags;
2893 u32 ip_addr[4] = {0};
2900 struct sockaddr saddr;
2901 struct sockaddr_in saddr_in;
2902 struct sockaddr_in6 saddr_in6;
2904 unsigned char dmac[ETH_ALEN];
2906 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
2908 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
2909 irdma_copy_ip_ntohl(ip_addr,
2910 sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
2911 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
2913 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
2914 "qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
2916 irdma_mcast_mac_v6(ip_addr, dmac);
2918 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
2920 vlan_id = irdma_get_vlan_ipv4(ip_addr);
2921 irdma_mcast_mac_v4(ip_addr, dmac);
2922 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
2923 "qp_id=%d, IP4address=%pI4, MAC=%pM\n",
2924 ibqp->qp_num, ip_addr, dmac);
2927 spin_lock_irqsave(&rf->qh_list_lock, flags);
2928 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
2930 struct irdma_dma_mem *dma_mem_mc;
2932 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
2933 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
2937 mc_qht_elem->mc_info.ipv4_valid = ipv4;
2938 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
2939 sizeof(mc_qht_elem->mc_info.dest_ip));
2940 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
2941 &mgn, &rf->next_mcg);
2947 mc_qht_elem->mc_info.mgn = mgn;
2948 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
2949 dma_mem_mc->size = sizeof(u64)* IRDMA_MAX_MGS_PER_CTX;
2950 dma_mem_mc->va = irdma_allocate_dma_mem(&rf->hw, dma_mem_mc,
2952 IRDMA_HW_PAGE_SIZE);
2953 if (!dma_mem_mc->va) {
2954 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
2959 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
2960 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
2961 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
2962 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
2963 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
2964 if (vlan_id < VLAN_N_VID)
2965 mc_qht_elem->mc_grp_ctx.vlan_valid = true;
2966 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id;
2967 mc_qht_elem->mc_grp_ctx.qs_handle =
2968 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
2969 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
2971 spin_lock_irqsave(&rf->qh_list_lock, flags);
2972 mcast_list_add(rf, mc_qht_elem);
2974 if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
2975 IRDMA_MAX_MGS_PER_CTX) {
2976 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
2981 mcg_info.qp_id = iwqp->ibqp.qp_num;
2982 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
2983 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
2984 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
2986 /* Only if there is a change do we need to modify or create */
2988 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
2989 IRDMA_OP_MC_CREATE);
2990 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
2991 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
2992 IRDMA_OP_MC_MODIFY);
3003 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3004 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3005 mcast_list_del(mc_qht_elem);
3006 irdma_free_dma_mem(&rf->hw,
3007 &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
3008 irdma_free_rsrc(rf, rf->allocated_mcgs,
3009 mc_qht_elem->mc_grp_ctx.mg_id);
3017 * irdma_detach_mcast - detach a qp from a multicast group
3019 * @ibgid: pointer to global ID
3022 * returns error status
3025 irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3027 struct irdma_qp *iwqp = to_iwqp(ibqp);
3028 struct irdma_device *iwdev = iwqp->iwdev;
3029 struct irdma_pci_f *rf = iwdev->rf;
3030 u32 ip_addr[4] = {0};
3031 struct mc_table_list *mc_qht_elem;
3032 struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
3034 unsigned long flags;
3036 struct sockaddr saddr;
3037 struct sockaddr_in saddr_in;
3038 struct sockaddr_in6 saddr_in6;
3041 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3042 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
3043 irdma_copy_ip_ntohl(ip_addr,
3044 sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
3046 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3048 spin_lock_irqsave(&rf->qh_list_lock, flags);
3049 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3051 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3052 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3053 "address not found MCG\n");
3057 mcg_info.qp_id = iwqp->ibqp.qp_num;
3058 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3059 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3060 mcast_list_del(mc_qht_elem);
3061 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3062 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3063 IRDMA_OP_MC_DESTROY);
3065 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3066 "failed MC_DESTROY MCG\n");
3067 spin_lock_irqsave(&rf->qh_list_lock, flags);
3068 mcast_list_add(rf, mc_qht_elem);
3069 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3073 irdma_free_dma_mem(&rf->hw,
3074 &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
3075 irdma_free_rsrc(rf, rf->allocated_mcgs,
3076 mc_qht_elem->mc_grp_ctx.mg_id);
3079 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3080 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3081 IRDMA_OP_MC_MODIFY);
3083 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3084 "failed Modify MCG\n");
3093 * irdma_query_ah - Query address handle
3094 * @ibah: pointer to address handle
3095 * @ah_attr: address handle attributes
3098 irdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
3100 struct irdma_ah *ah = to_iwah(ibah);
3102 memset(ah_attr, 0, sizeof(*ah_attr));
3103 if (ah->av.attrs.ah_flags & IB_AH_GRH) {
3104 ah_attr->ah_flags = IB_AH_GRH;
3105 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
3106 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
3107 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
3108 ah_attr->grh.sgid_index = ah->sgid_index;
3109 ah_attr->grh.sgid_index = ah->sgid_index;
3110 memcpy(&ah_attr->grh.dgid, &ah->dgid,
3111 sizeof(ah_attr->grh.dgid));
3117 static __be64 irdma_mac_to_guid(struct ifnet *ndev){
3118 unsigned char *mac = IF_LLADDR(ndev);
3120 unsigned char *dst = (unsigned char *)&guid;
3122 dst[0] = mac[0] ^ 2;
3134 static struct ifnet *
3135 irdma_get_netdev(struct ib_device *ibdev, u8 port_num)
3137 struct irdma_device *iwdev = to_iwdev(ibdev);
3139 if (iwdev->netdev) {
3140 dev_hold(iwdev->netdev);
3141 return iwdev->netdev;
3148 irdma_set_device_ops(struct ib_device *ibdev)
3150 struct ib_device *dev_ops = ibdev;
3152 dev_ops->ops.driver_id = RDMA_DRIVER_I40IW;
3153 dev_ops->ops.size_ib_ah = IRDMA_SET_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah);
3154 dev_ops->ops.size_ib_cq = IRDMA_SET_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq);
3155 dev_ops->ops.size_ib_pd = IRDMA_SET_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd);
3156 dev_ops->ops.size_ib_ucontext = IRDMA_SET_RDMA_OBJ_SIZE(ib_ucontext,
3160 dev_ops->alloc_hw_stats = irdma_alloc_hw_stats;
3161 dev_ops->alloc_mr = irdma_alloc_mr;
3162 dev_ops->alloc_mw = irdma_alloc_mw;
3163 dev_ops->alloc_pd = irdma_alloc_pd;
3164 dev_ops->alloc_ucontext = irdma_alloc_ucontext;
3165 dev_ops->create_cq = irdma_create_cq;
3166 dev_ops->create_qp = irdma_create_qp;
3167 dev_ops->dealloc_mw = irdma_dealloc_mw;
3168 dev_ops->dealloc_pd = irdma_dealloc_pd;
3169 dev_ops->dealloc_ucontext = irdma_dealloc_ucontext;
3170 dev_ops->dereg_mr = irdma_dereg_mr;
3171 dev_ops->destroy_cq = irdma_destroy_cq;
3172 dev_ops->destroy_qp = irdma_destroy_qp;
3173 dev_ops->disassociate_ucontext = irdma_disassociate_ucontext;
3174 dev_ops->get_dev_fw_str = irdma_get_dev_fw_str;
3175 dev_ops->get_dma_mr = irdma_get_dma_mr;
3176 dev_ops->get_hw_stats = irdma_get_hw_stats;
3177 dev_ops->get_netdev = irdma_get_netdev;
3178 dev_ops->map_mr_sg = irdma_map_mr_sg;
3179 dev_ops->mmap = irdma_mmap;
3180 dev_ops->mmap_free = irdma_mmap_free;
3181 dev_ops->poll_cq = irdma_poll_cq;
3182 dev_ops->post_recv = irdma_post_recv;
3183 dev_ops->post_send = irdma_post_send;
3184 dev_ops->query_device = irdma_query_device;
3185 dev_ops->query_port = irdma_query_port;
3186 dev_ops->modify_port = irdma_modify_port;
3187 dev_ops->query_qp = irdma_query_qp;
3188 dev_ops->reg_user_mr = irdma_reg_user_mr;
3189 dev_ops->req_notify_cq = irdma_req_notify_cq;
3190 dev_ops->resize_cq = irdma_resize_cq;
3194 irdma_set_device_roce_ops(struct ib_device *ibdev)
3196 struct ib_device *dev_ops = ibdev;
3198 dev_ops->attach_mcast = irdma_attach_mcast;
3199 dev_ops->create_ah = irdma_create_ah;
3200 dev_ops->destroy_ah = irdma_destroy_ah;
3201 dev_ops->detach_mcast = irdma_detach_mcast;
3202 dev_ops->get_link_layer = irdma_get_link_layer;
3203 dev_ops->get_port_immutable = irdma_roce_port_immutable;
3204 dev_ops->modify_qp = irdma_modify_qp_roce;
3205 dev_ops->query_ah = irdma_query_ah;
3206 dev_ops->query_gid = irdma_query_gid_roce;
3207 dev_ops->query_pkey = irdma_query_pkey;
3208 ibdev->add_gid = irdma_add_gid;
3209 ibdev->del_gid = irdma_del_gid;
3213 irdma_set_device_iw_ops(struct ib_device *ibdev)
3215 struct ib_device *dev_ops = ibdev;
3217 ibdev->uverbs_cmd_mask |=
3218 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
3219 (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
3221 dev_ops->create_ah = irdma_create_ah_stub;
3222 dev_ops->destroy_ah = irdma_destroy_ah_stub;
3223 dev_ops->get_port_immutable = irdma_iw_port_immutable;
3224 dev_ops->modify_qp = irdma_modify_qp;
3225 dev_ops->query_gid = irdma_query_gid;
3226 dev_ops->query_pkey = irdma_iw_query_pkey;
3230 * irdma_init_roce_device - initialization of roce rdma device
3231 * @iwdev: irdma device
3234 irdma_init_roce_device(struct irdma_device *iwdev)
3236 kc_set_roce_uverbs_cmd_mask(iwdev);
3237 iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
3238 iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
3239 irdma_set_device_roce_ops(&iwdev->ibdev);
3243 * irdma_init_iw_device - initialization of iwarp rdma device
3244 * @iwdev: irdma device
3247 irdma_init_iw_device(struct irdma_device *iwdev)
3249 struct ifnet *netdev = iwdev->netdev;
3251 iwdev->ibdev.node_type = RDMA_NODE_RNIC;
3252 ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, IF_LLADDR(netdev));
3253 iwdev->ibdev.iwcm = kzalloc(sizeof(*iwdev->ibdev.iwcm), GFP_KERNEL);
3254 if (!iwdev->ibdev.iwcm)
3257 iwdev->ibdev.iwcm->add_ref = irdma_qp_add_ref;
3258 iwdev->ibdev.iwcm->rem_ref = irdma_qp_rem_ref;
3259 iwdev->ibdev.iwcm->get_qp = irdma_get_qp;
3260 iwdev->ibdev.iwcm->connect = irdma_connect;
3261 iwdev->ibdev.iwcm->accept = irdma_accept;
3262 iwdev->ibdev.iwcm->reject = irdma_reject;
3263 iwdev->ibdev.iwcm->create_listen = irdma_create_listen;
3264 iwdev->ibdev.iwcm->destroy_listen = irdma_destroy_listen;
3265 memcpy(iwdev->ibdev.iwcm->ifname, if_name(netdev),
3266 sizeof(iwdev->ibdev.iwcm->ifname));
3267 irdma_set_device_iw_ops(&iwdev->ibdev);
3273 * irdma_init_rdma_device - initialization of rdma device
3274 * @iwdev: irdma device
3277 irdma_init_rdma_device(struct irdma_device *iwdev)
3279 struct pci_dev *pcidev = iwdev->rf->pcidev;
3282 iwdev->ibdev.owner = THIS_MODULE;
3283 iwdev->ibdev.uverbs_abi_ver = IRDMA_ABI_VER;
3284 kc_set_rdma_uverbs_cmd_mask(iwdev);
3286 if (iwdev->roce_mode) {
3287 irdma_init_roce_device(iwdev);
3289 ret = irdma_init_iw_device(iwdev);
3293 iwdev->ibdev.phys_port_cnt = 1;
3294 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
3295 iwdev->ibdev.dev.parent = iwdev->rf->dev_ctx.dev;
3296 set_ibdev_dma_device(iwdev->ibdev, &pcidev->dev);
3297 irdma_set_device_ops(&iwdev->ibdev);
3303 * irdma_port_ibevent - indicate port event
3304 * @iwdev: irdma device
3307 irdma_port_ibevent(struct irdma_device *iwdev)
3309 struct ib_event event;
3311 event.device = &iwdev->ibdev;
3312 event.element.port_num = 1;
3314 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3315 ib_dispatch_event(&event);
3319 * irdma_ib_unregister_device - unregister rdma device from IB
3321 * @iwdev: irdma device
3324 irdma_ib_unregister_device(struct irdma_device *iwdev)
3326 iwdev->iw_status = 0;
3327 irdma_port_ibevent(iwdev);
3328 ib_unregister_device(&iwdev->ibdev);
3329 kfree(iwdev->ibdev.iwcm);
3330 iwdev->ibdev.iwcm = NULL;
3334 * irdma_ib_register_device - register irdma device to IB core
3335 * @iwdev: irdma device
3338 irdma_ib_register_device(struct irdma_device *iwdev)
3342 ret = irdma_init_rdma_device(iwdev);
3346 sprintf(iwdev->ibdev.name, "irdma-%s", if_name(iwdev->netdev));
3347 ret = ib_register_device(&iwdev->ibdev, NULL);
3351 iwdev->iw_status = 1;
3352 irdma_port_ibevent(iwdev);
3357 kfree(iwdev->ibdev.iwcm);
3358 iwdev->ibdev.iwcm = NULL;
3360 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
3361 "Register RDMA device fail\n");