2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
4 * Copyright (c) 2015 - 2022 Intel Corporation
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "irdma_main.h"
39 * irdma_query_device - get device attributes
40 * @ibdev: device pointer from stack
41 * @props: returning device attributes
45 irdma_query_device(struct ib_device *ibdev,
46 struct ib_device_attr *props,
47 struct ib_udata *udata)
49 struct irdma_device *iwdev = to_iwdev(ibdev);
50 struct irdma_pci_f *rf = iwdev->rf;
51 struct pci_dev *pcidev = iwdev->rf->pcidev;
52 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
54 if (udata->inlen || udata->outlen)
57 memset(props, 0, sizeof(*props));
58 ether_addr_copy((u8 *)&props->sys_image_guid, IF_LLADDR(iwdev->netdev));
59 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
60 irdma_fw_minor_ver(&rf->sc_dev);
61 props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
62 IB_DEVICE_MEM_MGT_EXTENSIONS;
63 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
64 props->vendor_id = pcidev->vendor;
65 props->vendor_part_id = pcidev->device;
66 props->hw_ver = pcidev->revision;
67 props->page_size_cap = hw_attrs->page_size_cap;
68 props->max_mr_size = hw_attrs->max_mr_size;
69 props->max_qp = rf->max_qp - rf->used_qps;
70 props->max_qp_wr = hw_attrs->max_qp_wr;
71 set_max_sge(props, rf);
72 props->max_cq = rf->max_cq - rf->used_cqs;
73 props->max_cqe = rf->max_cqe - 1;
74 props->max_mr = rf->max_mr - rf->used_mrs;
75 props->max_mw = props->max_mr;
76 props->max_pd = rf->max_pd - rf->used_pds;
77 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
78 props->max_qp_rd_atom = hw_attrs->max_hw_ird;
79 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
80 if (rdma_protocol_roce(ibdev, 1)) {
81 props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
82 props->max_pkeys = IRDMA_PKEY_TBL_SZ;
83 props->max_ah = rf->max_ah;
84 if (hw_attrs->uk_attrs.hw_rev == IRDMA_GEN_2) {
85 props->max_mcast_grp = rf->max_mcg;
86 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
87 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
90 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
91 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
92 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
98 irdma_mmap_legacy(struct irdma_ucontext *ucontext,
99 struct vm_area_struct *vma)
103 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
106 vma->vm_private_data = ucontext;
107 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
108 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
110 #if __FreeBSD_version >= 1400026
111 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
112 pgprot_noncached(vma->vm_page_prot), NULL);
114 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
115 pgprot_noncached(vma->vm_page_prot));
119 #if __FreeBSD_version >= 1400026
121 irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
123 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
128 struct rdma_user_mmap_entry *
129 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
130 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
132 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
138 entry->bar_offset = bar_offset;
139 entry->mmap_flag = mmap_flag;
141 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
142 &entry->rdma_entry, PAGE_SIZE);
147 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
149 return &entry->rdma_entry;
154 find_key_in_mmap_tbl(struct irdma_ucontext *ucontext, u64 key)
156 struct irdma_user_mmap_entry *entry;
158 HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, key) {
159 if (entry->pgoff_key == key)
166 struct irdma_user_mmap_entry *
167 irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
168 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
170 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
177 entry->bar_offset = bar_offset;
178 entry->mmap_flag = mmap_flag;
179 entry->ucontext = ucontext;
181 get_random_bytes(&entry->pgoff_key, sizeof(entry->pgoff_key));
183 /* The key is a page offset */
184 entry->pgoff_key >>= PAGE_SHIFT;
186 /* In the event of a collision in the hash table, retry a new key */
187 spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
188 if (!find_key_in_mmap_tbl(ucontext, entry->pgoff_key)) {
189 HASH_ADD(ucontext->mmap_hash_tbl, &entry->hlist, entry->pgoff_key);
190 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
193 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
194 } while (retry_cnt++ < 10);
196 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS, "mmap table add failed: Cannot find a unique key\n");
201 /* libc mmap uses a byte offset */
202 *mmap_offset = entry->pgoff_key << PAGE_SHIFT;
207 static struct irdma_user_mmap_entry *
208 irdma_find_user_mmap_entry(struct irdma_ucontext *ucontext,
209 struct vm_area_struct *vma)
211 struct irdma_user_mmap_entry *entry;
214 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
217 spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
218 HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, vma->vm_pgoff) {
219 if (entry->pgoff_key == vma->vm_pgoff) {
220 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
225 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
231 irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry)
233 struct irdma_ucontext *ucontext;
239 ucontext = entry->ucontext;
241 spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
242 HASH_DEL(ucontext->mmap_hash_tbl, &entry->hlist);
243 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
250 * irdma_mmap - user memory map
251 * @context: context created during alloc
252 * @vma: kernel info for user memory map
255 irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
257 #if __FreeBSD_version >= 1400026
258 struct rdma_user_mmap_entry *rdma_entry;
260 struct irdma_user_mmap_entry *entry;
261 struct irdma_ucontext *ucontext;
265 ucontext = to_ucontext(context);
267 /* Legacy support for libi40iw with hard-coded mmap key */
268 if (ucontext->legacy_mode)
269 return irdma_mmap_legacy(ucontext, vma);
271 #if __FreeBSD_version >= 1400026
272 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
274 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
275 "pgoff[0x%lx] does not have valid entry\n",
280 entry = to_irdma_mmap_entry(rdma_entry);
282 entry = irdma_find_user_mmap_entry(ucontext, vma);
284 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
285 "pgoff[0x%lx] does not have valid entry\n",
290 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
291 "bar_offset [0x%lx] mmap_flag [%d]\n", entry->bar_offset,
294 pfn = (entry->bar_offset +
295 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
297 switch (entry->mmap_flag) {
298 case IRDMA_MMAP_IO_NC:
299 #if __FreeBSD_version >= 1400026
300 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
301 pgprot_noncached(vma->vm_page_prot),
304 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
305 pgprot_noncached(vma->vm_page_prot));
308 case IRDMA_MMAP_IO_WC:
309 #if __FreeBSD_version >= 1400026
310 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
311 pgprot_writecombine(vma->vm_page_prot),
314 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
315 pgprot_writecombine(vma->vm_page_prot));
323 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
324 "bar_offset [0x%lx] mmap_flag[%d] err[%d]\n",
325 entry->bar_offset, entry->mmap_flag, ret);
326 #if __FreeBSD_version >= 1400026
327 rdma_user_mmap_entry_put(rdma_entry);
334 * irdma_alloc_push_page - allocate a push page for qp
338 irdma_alloc_push_page(struct irdma_qp *iwqp)
340 struct irdma_cqp_request *cqp_request;
341 struct cqp_cmds_info *cqp_info;
342 struct irdma_device *iwdev = iwqp->iwdev;
343 struct irdma_sc_qp *qp = &iwqp->sc_qp;
346 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
350 cqp_info = &cqp_request->info;
351 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
352 cqp_info->post_sq = 1;
353 cqp_info->in.u.manage_push_page.info.push_idx = 0;
354 cqp_info->in.u.manage_push_page.info.qs_handle =
355 qp->vsi->qos[qp->user_pri].qs_handle;
356 cqp_info->in.u.manage_push_page.info.free_page = 0;
357 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
358 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
359 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
361 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
362 if (!status && cqp_request->compl_info.op_ret_val <
363 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
364 qp->push_idx = cqp_request->compl_info.op_ret_val;
368 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
372 * irdma_get_pbl - Retrieve pbl from a list given a virtual
374 * @va: user virtual address
375 * @pbl_list: pbl list to search in (QP's or CQ's)
378 irdma_get_pbl(unsigned long va,
379 struct list_head *pbl_list)
381 struct irdma_pbl *iwpbl;
383 list_for_each_entry(iwpbl, pbl_list, list) {
384 if (iwpbl->user_base == va) {
385 list_del(&iwpbl->list);
386 iwpbl->on_list = false;
395 * irdma_clean_cqes - clean cq entries for qp
396 * @iwqp: qp ptr (user or kernel)
400 irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
402 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
405 spin_lock_irqsave(&iwcq->lock, flags);
406 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
407 spin_unlock_irqrestore(&iwcq->lock, flags);
410 static u64 irdma_compute_push_wqe_offset(struct irdma_device *iwdev, u32 page_idx){
411 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
413 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
414 /* skip over db page */
415 bar_off += IRDMA_HW_PAGE_SIZE;
416 /* skip over reserved space */
417 bar_off += IRDMA_PF_BAR_RSVD;
421 bar_off += (u64)page_idx * IRDMA_HW_PAGE_SIZE;
427 irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
429 if (iwqp->push_db_mmap_entry) {
430 #if __FreeBSD_version >= 1400026
431 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
433 irdma_user_mmap_entry_del_hash(iwqp->push_db_mmap_entry);
435 iwqp->push_db_mmap_entry = NULL;
437 if (iwqp->push_wqe_mmap_entry) {
438 #if __FreeBSD_version >= 1400026
439 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
441 irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
443 iwqp->push_wqe_mmap_entry = NULL;
448 irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
449 struct irdma_qp *iwqp,
450 u64 *push_wqe_mmap_key,
451 u64 *push_db_mmap_key)
453 struct irdma_device *iwdev = ucontext->iwdev;
456 WARN_ON_ONCE(iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_2);
458 bar_off = irdma_compute_push_wqe_offset(iwdev, iwqp->sc_qp.push_idx);
460 #if __FreeBSD_version >= 1400026
461 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
462 bar_off, IRDMA_MMAP_IO_WC,
465 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
466 IRDMA_MMAP_IO_WC, push_wqe_mmap_key);
468 if (!iwqp->push_wqe_mmap_entry)
471 /* push doorbell page */
472 bar_off += IRDMA_HW_PAGE_SIZE;
473 #if __FreeBSD_version >= 1400026
474 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
475 bar_off, IRDMA_MMAP_IO_NC,
479 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
480 IRDMA_MMAP_IO_NC, push_db_mmap_key);
482 if (!iwqp->push_db_mmap_entry) {
483 #if __FreeBSD_version >= 1400026
484 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
486 irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
495 * irdma_setup_virt_qp - setup for allocation of virtual qp
496 * @iwdev: irdma device
498 * @init_info: initialize info to return
501 irdma_setup_virt_qp(struct irdma_device *iwdev,
502 struct irdma_qp *iwqp,
503 struct irdma_qp_init_info *init_info)
505 struct irdma_pbl *iwpbl = iwqp->iwpbl;
506 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
508 iwqp->page = qpmr->sq_page;
509 init_info->shadow_area_pa = qpmr->shadow;
510 if (iwpbl->pbl_allocated) {
511 init_info->virtual_map = true;
512 init_info->sq_pa = qpmr->sq_pbl.idx;
513 init_info->rq_pa = qpmr->rq_pbl.idx;
515 init_info->sq_pa = qpmr->sq_pbl.addr;
516 init_info->rq_pa = qpmr->rq_pbl.addr;
521 * irdma_setup_umode_qp - setup sq and rq size in user mode qp
523 * @iwdev: iwarp device
524 * @iwqp: qp ptr (user or kernel)
525 * @info: initialize info to return
526 * @init_attr: Initial QP create attributes
529 irdma_setup_umode_qp(struct ib_udata *udata,
530 struct irdma_device *iwdev,
531 struct irdma_qp *iwqp,
532 struct irdma_qp_init_info *info,
533 struct ib_qp_init_attr *init_attr)
535 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
536 struct irdma_create_qp_req req = {0};
540 ret = ib_copy_from_udata(&req, udata,
541 min(sizeof(req), udata->inlen));
543 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
544 "ib_copy_from_data fail\n");
548 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
550 if (req.user_wqe_bufs) {
551 #if __FreeBSD_version >= 1400026
552 struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
554 struct irdma_ucontext *ucontext = to_ucontext(iwqp->iwpd->ibpd.uobject->context);
556 info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
557 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
558 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
559 &ucontext->qp_reg_mem_list);
560 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
564 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
570 if (ukinfo->abi_ver <= 5) {
572 * For ABI version less than 6 passes raw sq and rq
573 * quanta in cap.max_send_wr and cap.max_recv_wr.
575 iwqp->max_send_wr = init_attr->cap.max_send_wr;
576 iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
577 ukinfo->sq_size = init_attr->cap.max_send_wr;
578 ukinfo->rq_size = init_attr->cap.max_recv_wr;
579 irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, &ukinfo->rq_shift);
581 ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
586 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
591 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
592 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
593 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
594 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
596 irdma_setup_virt_qp(iwdev, iwqp, info);
602 * irdma_setup_kmode_qp - setup initialization for kernel mode qp
603 * @iwdev: iwarp device
604 * @iwqp: qp ptr (user or kernel)
605 * @info: initialize info to return
606 * @init_attr: Initial QP create attributes
609 irdma_setup_kmode_qp(struct irdma_device *iwdev,
610 struct irdma_qp *iwqp,
611 struct irdma_qp_init_info *info,
612 struct ib_qp_init_attr *init_attr)
614 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
617 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
619 status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
624 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
629 iwqp->kqp.sq_wrid_mem =
630 kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
631 if (!iwqp->kqp.sq_wrid_mem)
634 iwqp->kqp.rq_wrid_mem =
635 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
636 if (!iwqp->kqp.rq_wrid_mem) {
637 kfree(iwqp->kqp.sq_wrid_mem);
638 iwqp->kqp.sq_wrid_mem = NULL;
642 iwqp->kqp.sig_trk_mem = kcalloc(ukinfo->sq_depth, sizeof(u32), GFP_KERNEL);
643 memset(iwqp->kqp.sig_trk_mem, 0, ukinfo->sq_depth * sizeof(u32));
644 if (!iwqp->kqp.sig_trk_mem) {
645 kfree(iwqp->kqp.sq_wrid_mem);
646 iwqp->kqp.sq_wrid_mem = NULL;
647 kfree(iwqp->kqp.rq_wrid_mem);
648 iwqp->kqp.rq_wrid_mem = NULL;
651 ukinfo->sq_sigwrtrk_array = (void *)iwqp->kqp.sig_trk_mem;
652 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
653 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
655 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
656 size += (IRDMA_SHADOW_AREA_SIZE << 3);
659 mem->va = irdma_allocate_dma_mem(&iwdev->rf->hw, mem, mem->size,
662 kfree(iwqp->kqp.sq_wrid_mem);
663 iwqp->kqp.sq_wrid_mem = NULL;
664 kfree(iwqp->kqp.rq_wrid_mem);
665 iwqp->kqp.rq_wrid_mem = NULL;
669 ukinfo->sq = mem->va;
670 info->sq_pa = mem->pa;
671 ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
672 info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
673 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
674 info->shadow_area_pa = info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
675 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
676 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
677 ukinfo->qp_id = iwqp->ibqp.qp_num;
679 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
680 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
681 init_attr->cap.max_send_wr = iwqp->max_send_wr;
682 init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
688 irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
690 struct irdma_pci_f *rf = iwqp->iwdev->rf;
691 struct irdma_cqp_request *cqp_request;
692 struct cqp_cmds_info *cqp_info;
693 struct irdma_create_qp_info *qp_info;
696 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
700 cqp_info = &cqp_request->info;
701 qp_info = &cqp_request->info.in.u.qp_create.info;
702 memset(qp_info, 0, sizeof(*qp_info));
703 qp_info->mac_valid = true;
704 qp_info->cq_num_valid = true;
705 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
707 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
708 cqp_info->post_sq = 1;
709 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
710 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
711 status = irdma_handle_cqp_op(rf, cqp_request);
712 irdma_put_cqp_request(&rf->cqp, cqp_request);
718 irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
719 struct irdma_qp_host_ctx_info *ctx_info)
721 struct irdma_device *iwdev = iwqp->iwdev;
722 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
723 struct irdma_roce_offload_info *roce_info;
724 struct irdma_udp_offload_info *udp_info;
726 udp_info = &iwqp->udp_info;
727 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
728 udp_info->cwnd = iwdev->roce_cwnd;
729 udp_info->rexmit_thresh = 2;
730 udp_info->rnr_nak_thresh = 2;
731 udp_info->src_port = 0xc000;
732 udp_info->dst_port = ROCE_V2_UDP_DPORT;
733 roce_info = &iwqp->roce_info;
734 ether_addr_copy(roce_info->mac_addr, IF_LLADDR(iwdev->netdev));
736 roce_info->rd_en = true;
737 roce_info->wr_rdresp_en = true;
738 roce_info->bind_en = true;
739 roce_info->dcqcn_en = false;
740 roce_info->rtomin = 5;
742 roce_info->ack_credits = iwdev->roce_ackcreds;
743 roce_info->ird_size = dev->hw_attrs.max_hw_ird;
744 roce_info->ord_size = dev->hw_attrs.max_hw_ord;
746 if (!iwqp->user_mode) {
747 roce_info->priv_mode_en = true;
748 roce_info->fast_reg_en = true;
749 roce_info->udprivcq_en = true;
751 roce_info->roce_tver = 0;
753 ctx_info->roce_info = &iwqp->roce_info;
754 ctx_info->udp_info = &iwqp->udp_info;
755 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
759 irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
760 struct irdma_qp_host_ctx_info *ctx_info)
762 struct irdma_device *iwdev = iwqp->iwdev;
763 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
764 struct irdma_iwarp_offload_info *iwarp_info;
766 iwarp_info = &iwqp->iwarp_info;
767 ether_addr_copy(iwarp_info->mac_addr, IF_LLADDR(iwdev->netdev));
768 iwarp_info->rd_en = true;
769 iwarp_info->wr_rdresp_en = true;
770 iwarp_info->bind_en = true;
771 iwarp_info->ecn_en = true;
772 iwarp_info->rtomin = 5;
774 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
775 iwarp_info->ib_rd_en = true;
776 if (!iwqp->user_mode) {
777 iwarp_info->priv_mode_en = true;
778 iwarp_info->fast_reg_en = true;
780 iwarp_info->ddp_ver = 1;
781 iwarp_info->rdmap_ver = 1;
783 ctx_info->iwarp_info = &iwqp->iwarp_info;
784 ctx_info->iwarp_info_valid = true;
785 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
786 ctx_info->iwarp_info_valid = false;
790 irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
791 struct irdma_device *iwdev)
793 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
794 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
796 if (init_attr->create_flags)
799 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
800 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
801 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
804 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
805 if (init_attr->qp_type != IB_QPT_RC &&
806 init_attr->qp_type != IB_QPT_UD &&
807 init_attr->qp_type != IB_QPT_GSI)
810 if (init_attr->qp_type != IB_QPT_RC)
818 irdma_sched_qp_flush_work(struct irdma_qp *iwqp)
820 irdma_qp_add_ref(&iwqp->ibqp);
821 if (mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
822 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)))
823 irdma_qp_rem_ref(&iwqp->ibqp);
827 irdma_flush_worker(struct work_struct *work)
829 struct delayed_work *dwork = to_delayed_work(work);
830 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
832 irdma_generate_flush_completions(iwqp);
833 /* For the add in irdma_sched_qp_flush_work */
834 irdma_qp_rem_ref(&iwqp->ibqp);
838 irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
842 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
843 if (iwqp->roce_info.wr_rdresp_en) {
844 acc_flags |= IB_ACCESS_LOCAL_WRITE;
845 acc_flags |= IB_ACCESS_REMOTE_WRITE;
847 if (iwqp->roce_info.rd_en)
848 acc_flags |= IB_ACCESS_REMOTE_READ;
849 if (iwqp->roce_info.bind_en)
850 acc_flags |= IB_ACCESS_MW_BIND;
852 if (iwqp->iwarp_info.wr_rdresp_en) {
853 acc_flags |= IB_ACCESS_LOCAL_WRITE;
854 acc_flags |= IB_ACCESS_REMOTE_WRITE;
856 if (iwqp->iwarp_info.rd_en)
857 acc_flags |= IB_ACCESS_REMOTE_READ;
858 if (iwqp->iwarp_info.bind_en)
859 acc_flags |= IB_ACCESS_MW_BIND;
865 * irdma_query_qp - query qp attributes
867 * @attr: attributes pointer
868 * @attr_mask: Not used
869 * @init_attr: qp attributes to return
872 irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
873 int attr_mask, struct ib_qp_init_attr *init_attr)
875 struct irdma_qp *iwqp = to_iwqp(ibqp);
876 struct irdma_sc_qp *qp = &iwqp->sc_qp;
878 memset(attr, 0, sizeof(*attr));
879 memset(init_attr, 0, sizeof(*init_attr));
881 attr->qp_state = iwqp->ibqp_state;
882 attr->cur_qp_state = iwqp->ibqp_state;
883 attr->cap.max_send_wr = iwqp->max_send_wr;
884 attr->cap.max_recv_wr = iwqp->max_recv_wr;
885 attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
886 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
887 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
888 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
890 if (rdma_protocol_roce(ibqp->device, 1)) {
891 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
892 attr->qkey = iwqp->roce_info.qkey;
893 attr->rq_psn = iwqp->udp_info.epsn;
894 attr->sq_psn = iwqp->udp_info.psn_nxt;
895 attr->dest_qp_num = iwqp->roce_info.dest_qp;
896 attr->pkey_index = iwqp->roce_info.p_key;
897 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
898 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
899 attr->max_rd_atomic = iwqp->roce_info.ord_size;
900 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
903 init_attr->event_handler = iwqp->ibqp.event_handler;
904 init_attr->qp_context = iwqp->ibqp.qp_context;
905 init_attr->send_cq = iwqp->ibqp.send_cq;
906 init_attr->recv_cq = iwqp->ibqp.recv_cq;
907 init_attr->cap = attr->cap;
913 * irdma_modify_qp_roce - modify qp request
914 * @ibqp: qp's pointer for modify
915 * @attr: access attributes
916 * @attr_mask: state mask
920 irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
921 int attr_mask, struct ib_udata *udata)
923 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
924 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
925 struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
926 struct irdma_qp *iwqp = to_iwqp(ibqp);
927 struct irdma_device *iwdev = iwqp->iwdev;
928 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
929 struct irdma_qp_host_ctx_info *ctx_info;
930 struct irdma_roce_offload_info *roce_info;
931 struct irdma_udp_offload_info *udp_info;
932 struct irdma_modify_qp_info info = {0};
933 struct irdma_modify_qp_resp uresp = {};
934 struct irdma_modify_qp_req ureq;
936 u8 issue_modify_qp = 0;
939 ctx_info = &iwqp->ctx_info;
940 roce_info = &iwqp->roce_info;
941 udp_info = &iwqp->udp_info;
944 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
945 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
949 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
952 if (attr_mask & IB_QP_DEST_QPN)
953 roce_info->dest_qp = attr->dest_qp_num;
955 if (attr_mask & IB_QP_PKEY_INDEX) {
956 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
962 if (attr_mask & IB_QP_QKEY)
963 roce_info->qkey = attr->qkey;
965 if (attr_mask & IB_QP_PATH_MTU)
966 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
968 if (attr_mask & IB_QP_SQ_PSN) {
969 udp_info->psn_nxt = attr->sq_psn;
970 udp_info->lsn = 0xffff;
971 udp_info->psn_una = attr->sq_psn;
972 udp_info->psn_max = attr->sq_psn;
975 if (attr_mask & IB_QP_RQ_PSN)
976 udp_info->epsn = attr->rq_psn;
978 if (attr_mask & IB_QP_RNR_RETRY)
979 udp_info->rnr_nak_thresh = attr->rnr_retry;
981 if (attr_mask & IB_QP_RETRY_CNT)
982 udp_info->rexmit_thresh = attr->retry_cnt;
984 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
986 if (attr_mask & IB_QP_AV) {
987 struct irdma_av *av = &iwqp->roce_ah.av;
988 u16 vlan_id = VLAN_N_VID;
989 u32 local_ip[4] = {};
991 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
992 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
993 udp_info->ttl = attr->ah_attr.grh.hop_limit;
994 udp_info->flow_label = attr->ah_attr.grh.flow_label;
995 udp_info->tos = attr->ah_attr.grh.traffic_class;
997 udp_info->src_port = kc_rdma_get_udp_sport(udp_info->flow_label,
1001 irdma_qp_rem_qos(&iwqp->sc_qp);
1002 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1003 if (iwqp->sc_qp.vsi->dscp_mode)
1004 ctx_info->user_pri =
1005 iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
1007 ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1009 ret = kc_irdma_set_roce_cm_info(iwqp, attr, &vlan_id);
1012 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1014 iwqp->sc_qp.user_pri = ctx_info->user_pri;
1015 irdma_qp_add_qos(&iwqp->sc_qp);
1017 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1019 if (vlan_id < VLAN_N_VID) {
1020 udp_info->insert_vlan_tag = true;
1021 udp_info->vlan_tag = vlan_id |
1022 ctx_info->user_pri << VLAN_PRIO_SHIFT;
1024 udp_info->insert_vlan_tag = false;
1027 av->attrs = attr->ah_attr;
1028 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1029 if (av->sgid_addr.saddr.sa_family == AF_INET6) {
1031 av->dgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
1033 av->sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
1035 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1036 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1038 udp_info->ipv4 = false;
1039 irdma_copy_ip_ntohl(local_ip, daddr);
1041 udp_info->arp_idx = irdma_arp_table(iwdev->rf, local_ip,
1042 NULL, IRDMA_ARP_RESOLVE);
1044 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1045 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1047 local_ip[0] = ntohl(daddr);
1049 udp_info->ipv4 = true;
1050 udp_info->dest_ip_addr[0] = 0;
1051 udp_info->dest_ip_addr[1] = 0;
1052 udp_info->dest_ip_addr[2] = 0;
1053 udp_info->dest_ip_addr[3] = local_ip[0];
1055 udp_info->local_ipaddr[0] = 0;
1056 udp_info->local_ipaddr[1] = 0;
1057 udp_info->local_ipaddr[2] = 0;
1058 udp_info->local_ipaddr[3] = ntohl(saddr);
1061 irdma_add_arp(iwdev->rf, local_ip,
1062 ah_attr_to_dmac(attr->ah_attr));
1065 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1066 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1067 ibdev_err(&iwdev->ibdev,
1068 "rd_atomic = %d, above max_hw_ord=%d\n",
1069 attr->max_rd_atomic,
1070 dev->hw_attrs.max_hw_ord);
1073 if (attr->max_rd_atomic)
1074 roce_info->ord_size = attr->max_rd_atomic;
1075 info.ord_valid = true;
1078 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1079 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1080 ibdev_err(&iwdev->ibdev,
1081 "rd_atomic = %d, above max_hw_ird=%d\n",
1082 attr->max_rd_atomic,
1083 dev->hw_attrs.max_hw_ird);
1086 if (attr->max_dest_rd_atomic)
1087 roce_info->ird_size = attr->max_dest_rd_atomic;
1090 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1091 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1092 roce_info->wr_rdresp_en = true;
1093 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1094 roce_info->wr_rdresp_en = true;
1095 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1096 roce_info->rd_en = true;
1099 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1101 irdma_debug(dev, IRDMA_DEBUG_VERBS,
1102 "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1103 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state,
1104 iwqp->iwarp_state, attr_mask);
1106 spin_lock_irqsave(&iwqp->lock, flags);
1107 if (attr_mask & IB_QP_STATE) {
1108 if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1109 iwqp->ibqp.qp_type, attr_mask,
1110 IB_LINK_LAYER_ETHERNET)) {
1111 irdma_print("modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1112 iwqp->ibqp.qp_num, iwqp->ibqp_state,
1117 info.curr_iwarp_state = iwqp->iwarp_state;
1119 switch (attr->qp_state) {
1121 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1126 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1127 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1128 issue_modify_qp = 1;
1132 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1136 info.arp_cache_idx_valid = true;
1137 info.cq_num_valid = true;
1138 info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1139 issue_modify_qp = 1;
1142 if (iwqp->ibqp_state < IB_QPS_RTR ||
1143 iwqp->ibqp_state == IB_QPS_ERR) {
1148 info.arp_cache_idx_valid = true;
1149 info.cq_num_valid = true;
1150 info.ord_valid = true;
1151 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1152 issue_modify_qp = 1;
1153 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
1154 iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp);
1155 udp_info->cwnd = iwdev->roce_cwnd;
1156 roce_info->ack_credits = iwdev->roce_ackcreds;
1157 if (iwdev->push_mode && udata &&
1158 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1159 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1160 spin_unlock_irqrestore(&iwqp->lock, flags);
1161 irdma_alloc_push_page(iwqp);
1162 spin_lock_irqsave(&iwqp->lock, flags);
1166 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1169 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1174 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1175 issue_modify_qp = 1;
1180 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
1181 spin_unlock_irqrestore(&iwqp->lock, flags);
1182 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1183 irdma_hw_modify_qp(iwdev, iwqp, &info, true);
1184 spin_lock_irqsave(&iwqp->lock, flags);
1187 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1188 spin_unlock_irqrestore(&iwqp->lock, flags);
1189 if (udata && udata->inlen) {
1190 if (ib_copy_from_udata(&ureq, udata,
1191 min(sizeof(ureq), udata->inlen)))
1194 irdma_flush_wqes(iwqp,
1195 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1196 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1202 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1203 issue_modify_qp = 1;
1210 iwqp->ibqp_state = attr->qp_state;
1213 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1214 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1215 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1216 spin_unlock_irqrestore(&iwqp->lock, flags);
1218 if (attr_mask & IB_QP_STATE) {
1219 if (issue_modify_qp) {
1220 ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1221 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1223 spin_lock_irqsave(&iwqp->lock, flags);
1224 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1225 iwqp->iwarp_state = info.next_iwarp_state;
1226 iwqp->ibqp_state = attr->qp_state;
1228 if (iwqp->ibqp_state > IB_QPS_RTS &&
1229 !iwqp->flush_issued) {
1230 spin_unlock_irqrestore(&iwqp->lock, flags);
1231 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1234 iwqp->flush_issued = 1;
1237 spin_unlock_irqrestore(&iwqp->lock, flags);
1240 iwqp->ibqp_state = attr->qp_state;
1242 if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1243 struct irdma_ucontext *ucontext;
1245 #if __FreeBSD_version >= 1400026
1246 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1248 ucontext = to_ucontext(ibqp->uobject->context);
1250 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1251 !iwqp->push_wqe_mmap_entry &&
1252 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1253 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1254 uresp.push_valid = 1;
1255 uresp.push_offset = iwqp->sc_qp.push_offset;
1257 uresp.rd_fence_rate = iwdev->rd_fence_rate;
1258 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1261 irdma_remove_push_mmap_entries(iwqp);
1262 irdma_debug(iwdev_to_idev(iwdev),
1264 "copy_to_udata failed\n");
1272 spin_unlock_irqrestore(&iwqp->lock, flags);
1278 * irdma_modify_qp - modify qp request
1279 * @ibqp: qp's pointer for modify
1280 * @attr: access attributes
1281 * @attr_mask: state mask
1285 irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1286 struct ib_udata *udata)
1288 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1289 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1290 struct irdma_qp *iwqp = to_iwqp(ibqp);
1291 struct irdma_device *iwdev = iwqp->iwdev;
1292 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1293 struct irdma_qp_host_ctx_info *ctx_info;
1294 struct irdma_tcp_offload_info *tcp_info;
1295 struct irdma_iwarp_offload_info *offload_info;
1296 struct irdma_modify_qp_info info = {0};
1297 struct irdma_modify_qp_resp uresp = {};
1298 struct irdma_modify_qp_req ureq = {};
1299 u8 issue_modify_qp = 0;
1302 unsigned long flags;
1305 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1306 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1310 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1313 ctx_info = &iwqp->ctx_info;
1314 offload_info = &iwqp->iwarp_info;
1315 tcp_info = &iwqp->tcp_info;
1316 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1317 irdma_debug(dev, IRDMA_DEBUG_VERBS,
1318 "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1319 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state, iwqp->iwarp_state,
1320 iwqp->last_aeq, iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1322 spin_lock_irqsave(&iwqp->lock, flags);
1323 if (attr_mask & IB_QP_STATE) {
1324 info.curr_iwarp_state = iwqp->iwarp_state;
1325 switch (attr->qp_state) {
1328 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1333 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1334 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1335 issue_modify_qp = 1;
1337 if (iwdev->push_mode && udata &&
1338 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1339 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1340 spin_unlock_irqrestore(&iwqp->lock, flags);
1341 irdma_alloc_push_page(iwqp);
1342 spin_lock_irqsave(&iwqp->lock, flags);
1346 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1352 issue_modify_qp = 1;
1353 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1354 iwqp->hte_added = 1;
1355 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1356 info.tcp_ctx_valid = true;
1357 info.ord_valid = true;
1358 info.arp_cache_idx_valid = true;
1359 info.cq_num_valid = true;
1362 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1367 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1368 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1373 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1378 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1379 issue_modify_qp = 1;
1382 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1387 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1388 issue_modify_qp = 1;
1392 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1393 spin_unlock_irqrestore(&iwqp->lock, flags);
1394 if (udata && udata->inlen) {
1395 if (ib_copy_from_udata(&ureq, udata,
1396 min(sizeof(ureq), udata->inlen)))
1399 irdma_flush_wqes(iwqp,
1400 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1401 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1407 if (iwqp->sc_qp.term_flags) {
1408 spin_unlock_irqrestore(&iwqp->lock, flags);
1409 irdma_terminate_del_timer(&iwqp->sc_qp);
1410 spin_lock_irqsave(&iwqp->lock, flags);
1412 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1413 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1415 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1416 info.reset_tcp_conn = true;
1420 issue_modify_qp = 1;
1421 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1428 iwqp->ibqp_state = attr->qp_state;
1430 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1431 ctx_info->iwarp_info_valid = true;
1432 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1433 offload_info->wr_rdresp_en = true;
1434 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1435 offload_info->wr_rdresp_en = true;
1436 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1437 offload_info->rd_en = true;
1440 if (ctx_info->iwarp_info_valid) {
1441 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1442 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1443 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1445 spin_unlock_irqrestore(&iwqp->lock, flags);
1447 if (attr_mask & IB_QP_STATE) {
1448 if (issue_modify_qp) {
1449 ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1450 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1454 spin_lock_irqsave(&iwqp->lock, flags);
1455 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1456 iwqp->iwarp_state = info.next_iwarp_state;
1457 iwqp->ibqp_state = attr->qp_state;
1459 spin_unlock_irqrestore(&iwqp->lock, flags);
1462 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1464 if (iwqp->hw_tcp_state) {
1465 spin_lock_irqsave(&iwqp->lock, flags);
1466 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1467 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1468 spin_unlock_irqrestore(&iwqp->lock, flags);
1470 irdma_cm_disconn(iwqp);
1472 int close_timer_started;
1474 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1476 if (iwqp->cm_node) {
1477 atomic_inc(&iwqp->cm_node->refcnt);
1478 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1479 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1480 if (iwqp->cm_id && close_timer_started == 1)
1481 irdma_schedule_cm_timer(iwqp->cm_node,
1482 (struct irdma_puda_buf *)iwqp,
1483 IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1485 irdma_rem_ref_cm_node(iwqp->cm_node);
1487 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1491 if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
1492 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1493 struct irdma_ucontext *ucontext;
1495 #if __FreeBSD_version >= 1400026
1496 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1498 ucontext = to_ucontext(ibqp->uobject->context);
1500 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1501 !iwqp->push_wqe_mmap_entry &&
1502 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1503 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1504 uresp.push_valid = 1;
1505 uresp.push_offset = iwqp->sc_qp.push_offset;
1507 uresp.rd_fence_rate = iwdev->rd_fence_rate;
1509 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1512 irdma_remove_push_mmap_entries(iwqp);
1513 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1514 "copy_to_udata failed\n");
1521 spin_unlock_irqrestore(&iwqp->lock, flags);
1527 * irdma_cq_free_rsrc - free up resources for cq
1528 * @rf: RDMA PCI function
1532 irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1534 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1536 if (!iwcq->user_mode) {
1537 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem);
1538 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem_shadow);
1541 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1545 * irdma_free_cqbuf - worker to free a cq buffer
1546 * @work: provides access to the cq buffer to free
1549 irdma_free_cqbuf(struct work_struct *work)
1551 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1553 irdma_free_dma_mem(cq_buf->hw, &cq_buf->kmem_buf);
1558 * irdma_process_resize_list - remove resized cq buffers from the resize_list
1559 * @iwcq: cq which owns the resize_list
1560 * @iwdev: irdma device
1561 * @lcqe_buf: the buffer where the last cqe is received
1564 irdma_process_resize_list(struct irdma_cq *iwcq,
1565 struct irdma_device *iwdev,
1566 struct irdma_cq_buf *lcqe_buf)
1568 struct list_head *tmp_node, *list_node;
1569 struct irdma_cq_buf *cq_buf;
1572 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1573 cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1574 if (cq_buf == lcqe_buf)
1577 list_del(&cq_buf->list);
1578 queue_work(iwdev->cleanup_wq, &cq_buf->work);
1586 * irdma_resize_cq - resize cq
1587 * @ibcq: cq to be resized
1588 * @entries: desired cq size
1592 irdma_resize_cq(struct ib_cq *ibcq, int entries,
1593 struct ib_udata *udata)
1595 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
1596 struct irdma_cq *iwcq = to_iwcq(ibcq);
1597 struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1598 struct irdma_cqp_request *cqp_request;
1599 struct cqp_cmds_info *cqp_info;
1600 struct irdma_modify_cq_info *m_info;
1601 struct irdma_modify_cq_info info = {0};
1602 struct irdma_dma_mem kmem_buf;
1603 struct irdma_cq_mr *cqmr_buf;
1604 struct irdma_pbl *iwpbl_buf;
1605 struct irdma_device *iwdev;
1606 struct irdma_pci_f *rf;
1607 struct irdma_cq_buf *cq_buf = NULL;
1608 unsigned long flags;
1611 iwdev = to_iwdev(ibcq->device);
1614 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1615 IRDMA_FEATURE_CQ_RESIZE))
1618 if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
1621 if (entries > rf->max_cqe)
1624 if (!iwcq->user_mode) {
1626 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1630 info.cq_size = max(entries, 4);
1632 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1636 struct irdma_resize_cq_req req = {};
1637 struct irdma_ucontext *ucontext =
1638 #if __FreeBSD_version >= 1400026
1639 rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1641 to_ucontext(ibcq->uobject->context);
1644 /* CQ resize not supported with legacy GEN_1 libi40iw */
1645 if (ucontext->legacy_mode)
1648 if (ib_copy_from_udata(&req, udata,
1649 min(sizeof(req), udata->inlen)))
1652 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1653 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1654 &ucontext->cq_reg_mem_list);
1655 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1660 cqmr_buf = &iwpbl_buf->cq_mr;
1661 if (iwpbl_buf->pbl_allocated) {
1662 info.virtual_map = true;
1663 info.pbl_chunk_size = 1;
1664 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1666 info.cq_pa = cqmr_buf->cq_pbl.addr;
1669 /* Kmode CQ resize */
1672 rsize = info.cq_size * sizeof(struct irdma_cqe);
1673 kmem_buf.size = round_up(rsize, 256);
1674 kmem_buf.va = irdma_allocate_dma_mem(dev->hw, &kmem_buf,
1675 kmem_buf.size, 256);
1679 info.cq_base = kmem_buf.va;
1680 info.cq_pa = kmem_buf.pa;
1681 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1688 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1694 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1695 info.cq_resize = true;
1697 cqp_info = &cqp_request->info;
1698 m_info = &cqp_info->in.u.cq_modify.info;
1699 memcpy(m_info, &info, sizeof(*m_info));
1701 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1702 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1703 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1704 cqp_info->post_sq = 1;
1705 ret = irdma_handle_cqp_op(rf, cqp_request);
1706 irdma_put_cqp_request(&rf->cqp, cqp_request);
1710 spin_lock_irqsave(&iwcq->lock, flags);
1712 cq_buf->kmem_buf = iwcq->kmem;
1713 cq_buf->hw = dev->hw;
1714 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1715 INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1716 list_add_tail(&cq_buf->list, &iwcq->resize_list);
1717 iwcq->kmem = kmem_buf;
1720 irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1721 ibcq->cqe = info.cq_size - 1;
1722 spin_unlock_irqrestore(&iwcq->lock, flags);
1727 irdma_free_dma_mem(dev->hw, &kmem_buf);
1734 * irdma_get_mr_access - get hw MR access permissions from IB access flags
1735 * @access: IB access flags
1737 static inline u16 irdma_get_mr_access(int access){
1740 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
1741 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
1742 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
1743 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
1744 hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
1745 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
1746 hw_access |= (access & IB_ACCESS_MW_BIND) ?
1747 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
1748 hw_access |= (access & IB_ZERO_BASED) ?
1749 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
1750 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
1756 * irdma_free_stag - free stag resource
1757 * @iwdev: irdma device
1758 * @stag: stag to free
1761 irdma_free_stag(struct irdma_device *iwdev, u32 stag)
1765 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
1766 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
1770 * irdma_create_stag - create random stag
1771 * @iwdev: irdma device
1774 irdma_create_stag(struct irdma_device *iwdev)
1778 u32 next_stag_index;
1784 get_random_bytes(&random, sizeof(random));
1785 consumer_key = (u8)random;
1787 driver_key = random & ~iwdev->rf->mr_stagmask;
1788 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
1789 next_stag_index %= iwdev->rf->max_mr;
1791 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
1792 iwdev->rf->max_mr, &stag_index,
1796 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
1798 stag += (u32)consumer_key;
1804 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
1805 * @arr: lvl1 pbl array
1806 * @npages: page count
1807 * @pg_size: page size
1811 irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1815 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1816 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1824 * irdma_check_mr_contiguous - check if MR is physically contiguous
1825 * @palloc: pbl allocation struct
1826 * @pg_size: page size
1829 irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
1832 struct irdma_pble_level2 *lvl2 = &palloc->level2;
1833 struct irdma_pble_info *leaf = lvl2->leaf;
1835 u64 *start_addr = NULL;
1839 if (palloc->level == PBLE_LEVEL_1) {
1840 arr = palloc->level1.addr;
1841 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
1846 start_addr = leaf->addr;
1848 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1850 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1852 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
1861 * irdma_setup_pbles - copy user pg address to pble's
1862 * @rf: RDMA PCI function
1863 * @iwmr: mr pointer for this memory registration
1864 * @use_pbles: flag if to use pble's
1865 * @lvl_1_only: request only level 1 pble if true
1868 irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
1869 bool use_pbles, bool lvl_1_only)
1871 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1872 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1873 struct irdma_pble_info *pinfo;
1876 enum irdma_pble_level level = PBLE_LEVEL_1;
1879 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
1884 iwpbl->pbl_allocated = true;
1885 level = palloc->level;
1886 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
1887 palloc->level2.leaf;
1890 pbl = iwmr->pgaddrmem;
1893 irdma_copy_user_pgaddrs(iwmr, pbl, level);
1896 iwmr->pgaddrmem[0] = *pbl;
1902 * irdma_handle_q_mem - handle memory for qp and cq
1903 * @iwdev: irdma device
1904 * @req: information for q memory management
1905 * @iwpbl: pble struct
1906 * @use_pbles: flag to use pble
1909 irdma_handle_q_mem(struct irdma_device *iwdev,
1910 struct irdma_mem_reg_req *req,
1911 struct irdma_pbl *iwpbl, bool use_pbles)
1913 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1914 struct irdma_mr *iwmr = iwpbl->iwmr;
1915 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
1916 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
1917 struct irdma_hmc_pble *hmc_p;
1918 u64 *arr = iwmr->pgaddrmem;
1923 pg_size = iwmr->page_size;
1924 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
1929 arr = palloc->level1.addr;
1931 switch (iwmr->type) {
1932 case IRDMA_MEMREG_TYPE_QP:
1933 total = req->sq_pages + req->rq_pages;
1934 hmc_p = &qpmr->sq_pbl;
1935 qpmr->shadow = (dma_addr_t) arr[total];
1937 ret = irdma_check_mem_contiguous(arr, req->sq_pages,
1940 ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
1946 hmc_p->idx = palloc->level1.idx;
1947 hmc_p = &qpmr->rq_pbl;
1948 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1950 hmc_p->addr = arr[0];
1951 hmc_p = &qpmr->rq_pbl;
1952 hmc_p->addr = arr[req->sq_pages];
1955 case IRDMA_MEMREG_TYPE_CQ:
1956 hmc_p = &cqmr->cq_pbl;
1959 cqmr->shadow = (dma_addr_t) arr[req->cq_pages];
1962 ret = irdma_check_mem_contiguous(arr, req->cq_pages,
1966 hmc_p->idx = palloc->level1.idx;
1968 hmc_p->addr = arr[0];
1971 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1976 if (use_pbles && ret) {
1977 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
1978 iwpbl->pbl_allocated = false;
1985 * irdma_hw_alloc_mw - create the hw memory window
1986 * @iwdev: irdma device
1987 * @iwmr: pointer to memory window info
1990 irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
1992 struct irdma_mw_alloc_info *info;
1993 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1994 struct irdma_cqp_request *cqp_request;
1995 struct cqp_cmds_info *cqp_info;
1998 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2002 cqp_info = &cqp_request->info;
2003 info = &cqp_info->in.u.mw_alloc.info;
2004 memset(info, 0, sizeof(*info));
2005 if (iwmr->ibmw.type == IB_MW_TYPE_1)
2006 info->mw_wide = true;
2008 info->page_size = PAGE_SIZE;
2009 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2010 info->pd_id = iwpd->sc_pd.pd_id;
2011 info->remote_access = true;
2012 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2013 cqp_info->post_sq = 1;
2014 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2015 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2016 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2017 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2023 * irdma_dealloc_mw - Dealloc memory window
2024 * @ibmw: memory window structure.
2027 irdma_dealloc_mw(struct ib_mw *ibmw)
2029 struct ib_pd *ibpd = ibmw->pd;
2030 struct irdma_pd *iwpd = to_iwpd(ibpd);
2031 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2032 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2033 struct irdma_cqp_request *cqp_request;
2034 struct cqp_cmds_info *cqp_info;
2035 struct irdma_dealloc_stag_info *info;
2037 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2041 cqp_info = &cqp_request->info;
2042 info = &cqp_info->in.u.dealloc_stag.info;
2043 memset(info, 0, sizeof(*info));
2044 info->pd_id = iwpd->sc_pd.pd_id;
2045 info->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S);
2047 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2048 cqp_info->post_sq = 1;
2049 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2050 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2051 irdma_handle_cqp_op(iwdev->rf, cqp_request);
2052 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2053 irdma_free_stag(iwdev, iwmr->stag);
2060 * irdma_hw_alloc_stag - cqp command to allocate stag
2061 * @iwdev: irdma device
2062 * @iwmr: irdma mr pointer
2065 irdma_hw_alloc_stag(struct irdma_device *iwdev,
2066 struct irdma_mr *iwmr)
2068 struct irdma_allocate_stag_info *info;
2069 struct ib_pd *pd = iwmr->ibmr.pd;
2070 struct irdma_pd *iwpd = to_iwpd(pd);
2071 struct irdma_cqp_request *cqp_request;
2072 struct cqp_cmds_info *cqp_info;
2075 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2079 cqp_info = &cqp_request->info;
2080 info = &cqp_info->in.u.alloc_stag.info;
2081 memset(info, 0, sizeof(*info));
2082 info->page_size = PAGE_SIZE;
2083 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2084 info->pd_id = iwpd->sc_pd.pd_id;
2085 info->total_len = iwmr->len;
2086 info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
2087 info->remote_access = true;
2088 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2089 cqp_info->post_sq = 1;
2090 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2091 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2092 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2093 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2101 * irdma_set_page - populate pbl list for fmr
2102 * @ibmr: ib mem to access iwarp mr pointer
2103 * @addr: page dma address fro pbl list
2106 irdma_set_page(struct ib_mr *ibmr, u64 addr)
2108 struct irdma_mr *iwmr = to_iwmr(ibmr);
2109 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2110 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2113 if (unlikely(iwmr->npages == iwmr->page_cnt))
2116 if (palloc->level == PBLE_LEVEL_2) {
2117 struct irdma_pble_info *palloc_info =
2118 palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
2120 palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
2122 pbl = palloc->level1.addr;
2123 pbl[iwmr->npages] = addr;
2131 * irdma_map_mr_sg - map of sg list for fmr
2132 * @ibmr: ib mem to access iwarp mr pointer
2133 * @sg: scatter gather list
2134 * @sg_nents: number of sg pages
2135 * @sg_offset: scatter gather list for fmr
2138 irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2139 int sg_nents, unsigned int *sg_offset)
2141 struct irdma_mr *iwmr = to_iwmr(ibmr);
2145 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2149 * irdma_hwreg_mr - send cqp command for memory registration
2150 * @iwdev: irdma device
2151 * @iwmr: irdma mr pointer
2152 * @access: access for MR
2155 irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2158 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2159 struct irdma_reg_ns_stag_info *stag_info;
2160 struct ib_pd *pd = iwmr->ibmr.pd;
2161 struct irdma_pd *iwpd = to_iwpd(pd);
2162 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2163 struct irdma_cqp_request *cqp_request;
2164 struct cqp_cmds_info *cqp_info;
2167 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2171 cqp_info = &cqp_request->info;
2172 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2173 memset(stag_info, 0, sizeof(*stag_info));
2174 stag_info->va = iwpbl->user_base;
2175 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2176 stag_info->stag_key = (u8)iwmr->stag;
2177 stag_info->total_len = iwmr->len;
2178 stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
2179 stag_info->access_rights = irdma_get_mr_access(access);
2180 stag_info->pd_id = iwpd->sc_pd.pd_id;
2181 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2182 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2184 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2185 stag_info->page_size = iwmr->page_size;
2187 if (iwpbl->pbl_allocated) {
2188 if (palloc->level == PBLE_LEVEL_1) {
2189 stag_info->first_pm_pbl_index = palloc->level1.idx;
2190 stag_info->chunk_size = 1;
2192 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2193 stag_info->chunk_size = 3;
2196 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2199 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2200 cqp_info->post_sq = 1;
2201 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2202 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2203 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2204 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2213 * irdma_reg_user_mr - Register a user memory region
2215 * @start: virtual start address
2216 * @len: length of mr
2217 * @virt: virtual address
2218 * @access: access of mr
2221 static struct ib_mr *
2222 irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2223 u64 virt, int access,
2224 struct ib_udata *udata)
2226 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
2227 struct irdma_device *iwdev = to_iwdev(pd->device);
2228 struct irdma_ucontext *ucontext;
2229 struct irdma_pble_alloc *palloc;
2230 struct irdma_pbl *iwpbl;
2231 struct irdma_mr *iwmr;
2232 struct ib_umem *region;
2233 struct irdma_mem_reg_req req = {};
2234 u32 total, stag = 0;
2235 u8 shadow_pgcnt = 1;
2236 bool use_pbles = false;
2237 unsigned long flags;
2241 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2242 return ERR_PTR(-EINVAL);
2244 if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
2245 return ERR_PTR(-EINVAL);
2247 region = ib_umem_get(pd->uobject->context, start, len, access, 0);
2249 if (IS_ERR(region)) {
2250 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
2251 "Failed to create ib_umem region\n");
2252 return (struct ib_mr *)region;
2255 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
2256 ib_umem_release(region);
2257 return ERR_PTR(-EFAULT);
2260 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2262 ib_umem_release(region);
2263 return ERR_PTR(-ENOMEM);
2266 iwpbl = &iwmr->iwpbl;
2268 iwmr->region = region;
2270 iwmr->ibmr.device = pd->device;
2271 iwmr->ibmr.iova = virt;
2272 iwmr->page_size = IRDMA_HW_PAGE_SIZE;
2273 iwmr->page_msk = ~(IRDMA_HW_PAGE_SIZE - 1);
2275 iwmr->len = region->length;
2276 iwpbl->user_base = virt;
2277 palloc = &iwpbl->pble_alloc;
2278 iwmr->type = req.reg_type;
2279 iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, virt);
2281 switch (req.reg_type) {
2282 case IRDMA_MEMREG_TYPE_QP:
2283 total = req.sq_pages + req.rq_pages + shadow_pgcnt;
2284 if (total > iwmr->page_cnt) {
2288 total = req.sq_pages + req.rq_pages;
2289 use_pbles = (total > 2);
2290 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2294 #if __FreeBSD_version >= 1400026
2295 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
2297 ucontext = to_ucontext(pd->uobject->context);
2299 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2300 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2301 iwpbl->on_list = true;
2302 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2304 case IRDMA_MEMREG_TYPE_CQ:
2305 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2307 total = req.cq_pages + shadow_pgcnt;
2308 if (total > iwmr->page_cnt) {
2313 use_pbles = (req.cq_pages > 1);
2314 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2318 #if __FreeBSD_version >= 1400026
2319 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
2321 ucontext = to_ucontext(pd->uobject->context);
2323 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2324 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2325 iwpbl->on_list = true;
2326 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2328 case IRDMA_MEMREG_TYPE_MEM:
2329 use_pbles = (iwmr->page_cnt != 1);
2331 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
2336 ret = irdma_check_mr_contiguous(palloc,
2339 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2340 iwpbl->pbl_allocated = false;
2344 stag = irdma_create_stag(iwdev);
2351 iwmr->ibmr.rkey = stag;
2352 iwmr->ibmr.lkey = stag;
2353 iwmr->access = access;
2354 err = irdma_hwreg_mr(iwdev, iwmr, access);
2356 irdma_free_stag(iwdev, stag);
2365 iwmr->type = req.reg_type;
2370 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2371 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2372 ib_umem_release(region);
2375 return ERR_PTR(err);
2379 irdma_hwdereg_mr(struct ib_mr *ib_mr)
2381 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
2382 struct irdma_mr *iwmr = to_iwmr(ib_mr);
2383 struct irdma_pd *iwpd = to_iwpd(ib_mr->pd);
2384 struct irdma_dealloc_stag_info *info;
2385 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2386 struct irdma_cqp_request *cqp_request;
2387 struct cqp_cmds_info *cqp_info;
2391 * Skip HW MR de-register when it is already de-registered during an MR re-reregister and the re-registration
2394 if (!iwmr->is_hwreg)
2397 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2401 cqp_info = &cqp_request->info;
2402 info = &cqp_info->in.u.dealloc_stag.info;
2403 memset(info, 0, sizeof(*info));
2404 info->pd_id = iwpd->sc_pd.pd_id;
2405 info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
2407 if (iwpbl->pbl_allocated)
2408 info->dealloc_pbl = true;
2410 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2411 cqp_info->post_sq = 1;
2412 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2413 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2414 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2415 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2424 * irdma_rereg_mr_trans - Re-register a user MR for a change translation. @iwmr: ptr of iwmr @start: virtual start
2425 * address @len: length of mr @virt: virtual address
2427 * Re-register a user memory region when a change translation is requested. Re-register a new region while reusing the
2428 * stag from the original registration.
2431 irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
2432 u64 virt, struct ib_udata *udata)
2434 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2435 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2436 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2437 struct ib_pd *pd = iwmr->ibmr.pd;
2438 struct ib_umem *region;
2442 region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0);
2444 if (IS_ERR(region)) {
2445 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
2446 "Failed to create ib_umem region\n");
2447 return (struct ib_mr *)region;
2450 iwmr->region = region;
2451 iwmr->ibmr.iova = virt;
2453 iwmr->page_size = PAGE_SIZE;
2455 iwmr->len = region->length;
2456 iwpbl->user_base = virt;
2457 iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size,
2460 use_pbles = (iwmr->page_cnt != 1);
2462 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
2467 err = irdma_check_mr_contiguous(palloc,
2470 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2471 iwpbl->pbl_allocated = false;
2475 err = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
2482 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) {
2483 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2484 iwpbl->pbl_allocated = false;
2486 ib_umem_release(region);
2487 iwmr->region = NULL;
2489 return ERR_PTR(err);
2493 * irdma_reg_phys_mr - register kernel physical memory
2495 * @addr: physical address of memory to register
2496 * @size: size of memory to register
2497 * @access: Access rights
2498 * @iova_start: start of virtual address for physical buffers
2501 irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
2504 struct irdma_device *iwdev = to_iwdev(pd->device);
2505 struct irdma_pbl *iwpbl;
2506 struct irdma_mr *iwmr;
2510 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2512 return ERR_PTR(-ENOMEM);
2515 iwmr->ibmr.device = pd->device;
2516 iwpbl = &iwmr->iwpbl;
2518 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2519 iwpbl->user_base = *iova_start;
2520 stag = irdma_create_stag(iwdev);
2527 iwmr->ibmr.iova = *iova_start;
2528 iwmr->ibmr.rkey = stag;
2529 iwmr->ibmr.lkey = stag;
2531 iwmr->pgaddrmem[0] = addr;
2533 iwmr->page_size = SZ_4K;
2534 ret = irdma_hwreg_mr(iwdev, iwmr, access);
2536 irdma_free_stag(iwdev, stag);
2545 return ERR_PTR(ret);
2549 * irdma_get_dma_mr - register physical mem
2551 * @acc: access for memory
2553 static struct ib_mr *
2554 irdma_get_dma_mr(struct ib_pd *pd, int acc)
2558 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
2562 * irdma_del_memlist - Deleting pbl list entries for CQ/QP
2563 * @iwmr: iwmr for IB's user page addresses
2564 * @ucontext: ptr to user context
2567 irdma_del_memlist(struct irdma_mr *iwmr,
2568 struct irdma_ucontext *ucontext)
2570 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2571 unsigned long flags;
2573 switch (iwmr->type) {
2574 case IRDMA_MEMREG_TYPE_CQ:
2575 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2576 if (iwpbl->on_list) {
2577 iwpbl->on_list = false;
2578 list_del(&iwpbl->list);
2580 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2582 case IRDMA_MEMREG_TYPE_QP:
2583 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2584 if (iwpbl->on_list) {
2585 iwpbl->on_list = false;
2586 list_del(&iwpbl->list);
2588 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2596 * irdma_copy_sg_list - copy sg list for qp
2597 * @sg_list: copied into sg_list
2598 * @sgl: copy from sgl
2599 * @num_sges: count of sg entries
2602 irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
2607 for (i = 0; i < num_sges; i++) {
2608 sg_list[i].tag_off = sgl[i].addr;
2609 sg_list[i].len = sgl[i].length;
2610 sg_list[i].stag = sgl[i].lkey;
2615 * irdma_post_send - kernel application wr
2616 * @ibqp: qp ptr for wr
2617 * @ib_wr: work request ptr
2618 * @bad_wr: return of bad wr if err
2621 irdma_post_send(struct ib_qp *ibqp,
2622 const struct ib_send_wr *ib_wr,
2623 const struct ib_send_wr **bad_wr)
2625 struct irdma_qp *iwqp;
2626 struct irdma_qp_uk *ukqp;
2627 struct irdma_sc_dev *dev;
2628 struct irdma_post_sq_info info;
2630 unsigned long flags;
2632 struct irdma_ah *ah;
2634 iwqp = to_iwqp(ibqp);
2635 ukqp = &iwqp->sc_qp.qp_uk;
2636 dev = &iwqp->iwdev->rf->sc_dev;
2638 spin_lock_irqsave(&iwqp->lock, flags);
2640 memset(&info, 0, sizeof(info));
2642 info.wr_id = (ib_wr->wr_id);
2643 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2644 info.signaled = true;
2645 if (ib_wr->send_flags & IB_SEND_FENCE)
2646 info.read_fence = true;
2647 switch (ib_wr->opcode) {
2648 case IB_WR_SEND_WITH_IMM:
2649 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
2650 info.imm_data_valid = true;
2651 info.imm_data = ntohl(ib_wr->ex.imm_data);
2658 case IB_WR_SEND_WITH_INV:
2659 if (ib_wr->opcode == IB_WR_SEND ||
2660 ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
2661 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2662 info.op_type = IRDMA_OP_TYPE_SEND_SOL;
2664 info.op_type = IRDMA_OP_TYPE_SEND;
2666 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2667 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
2669 info.op_type = IRDMA_OP_TYPE_SEND_INV;
2670 info.stag_to_inv = ib_wr->ex.invalidate_rkey;
2673 info.op.send.num_sges = ib_wr->num_sge;
2674 info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
2675 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
2676 iwqp->ibqp.qp_type == IB_QPT_GSI) {
2677 ah = to_iwah(ud_wr(ib_wr)->ah);
2678 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
2679 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
2680 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
2683 if (ib_wr->send_flags & IB_SEND_INLINE)
2684 err = irdma_uk_inline_send(ukqp, &info, false);
2686 err = irdma_uk_send(ukqp, &info, false);
2688 case IB_WR_RDMA_WRITE_WITH_IMM:
2689 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
2690 info.imm_data_valid = true;
2691 info.imm_data = ntohl(ib_wr->ex.imm_data);
2697 case IB_WR_RDMA_WRITE:
2698 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2699 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
2701 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
2703 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2704 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2705 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2706 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2707 if (ib_wr->send_flags & IB_SEND_INLINE)
2708 err = irdma_uk_inline_rdma_write(ukqp, &info, false);
2710 err = irdma_uk_rdma_write(ukqp, &info, false);
2712 case IB_WR_RDMA_READ_WITH_INV:
2715 case IB_WR_RDMA_READ:
2716 if (ib_wr->num_sge >
2717 dev->hw_attrs.uk_attrs.max_hw_read_sges) {
2721 info.op_type = IRDMA_OP_TYPE_RDMA_READ;
2722 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2723 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2724 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
2725 info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
2726 err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
2728 case IB_WR_LOCAL_INV:
2729 info.op_type = IRDMA_OP_TYPE_INV_STAG;
2730 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2731 err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
2734 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2735 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2736 struct irdma_fast_reg_stag_info stag_info = {0};
2738 stag_info.signaled = info.signaled;
2739 stag_info.read_fence = info.read_fence;
2740 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
2741 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
2742 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
2743 stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
2744 stag_info.wr_id = ib_wr->wr_id;
2745 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2746 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2747 stag_info.total_len = iwmr->ibmr.length;
2748 if (palloc->level == PBLE_LEVEL_2) {
2749 stag_info.chunk_size = 3;
2750 stag_info.first_pm_pbl_index = palloc->level2.root.idx;
2752 stag_info.chunk_size = 1;
2753 stag_info.first_pm_pbl_index = palloc->level1.idx;
2755 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2756 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
2762 irdma_debug(iwdev_to_idev(iwqp->iwdev),
2764 "upost_send bad opcode = 0x%x\n",
2771 ib_wr = ib_wr->next;
2774 if (!iwqp->flush_issued) {
2775 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
2776 irdma_uk_qp_post_wr(ukqp);
2777 spin_unlock_irqrestore(&iwqp->lock, flags);
2779 spin_unlock_irqrestore(&iwqp->lock, flags);
2780 irdma_sched_qp_flush_work(iwqp);
2789 * irdma_post_recv - post receive wr for kernel application
2790 * @ibqp: ib qp pointer
2791 * @ib_wr: work request for receive
2792 * @bad_wr: bad wr caused an error
2795 irdma_post_recv(struct ib_qp *ibqp,
2796 const struct ib_recv_wr *ib_wr,
2797 const struct ib_recv_wr **bad_wr)
2799 struct irdma_qp *iwqp = to_iwqp(ibqp);
2800 struct irdma_qp_uk *ukqp = &iwqp->sc_qp.qp_uk;
2801 struct irdma_post_rq_info post_recv = {0};
2802 struct irdma_sge *sg_list = iwqp->sg_list;
2803 unsigned long flags;
2806 spin_lock_irqsave(&iwqp->lock, flags);
2809 if (ib_wr->num_sge > ukqp->max_rq_frag_cnt) {
2813 post_recv.num_sges = ib_wr->num_sge;
2814 post_recv.wr_id = ib_wr->wr_id;
2815 irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2816 post_recv.sg_list = sg_list;
2817 err = irdma_uk_post_receive(ukqp, &post_recv);
2819 irdma_debug(iwdev_to_idev(iwqp->iwdev),
2820 IRDMA_DEBUG_VERBS, "post_recv err %d\n",
2825 ib_wr = ib_wr->next;
2829 spin_unlock_irqrestore(&iwqp->lock, flags);
2830 if (iwqp->flush_issued)
2831 irdma_sched_qp_flush_work(iwqp);
2839 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
2840 * @opcode: iwarp flush code
2842 static enum ib_wc_status
2843 irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
2846 case FLUSH_PROT_ERR:
2847 return IB_WC_LOC_PROT_ERR;
2848 case FLUSH_REM_ACCESS_ERR:
2849 return IB_WC_REM_ACCESS_ERR;
2850 case FLUSH_LOC_QP_OP_ERR:
2851 return IB_WC_LOC_QP_OP_ERR;
2852 case FLUSH_REM_OP_ERR:
2853 return IB_WC_REM_OP_ERR;
2854 case FLUSH_LOC_LEN_ERR:
2855 return IB_WC_LOC_LEN_ERR;
2856 case FLUSH_GENERAL_ERR:
2857 return IB_WC_WR_FLUSH_ERR;
2858 case FLUSH_MW_BIND_ERR:
2859 return IB_WC_MW_BIND_ERR;
2860 case FLUSH_REM_INV_REQ_ERR:
2861 return IB_WC_REM_INV_REQ_ERR;
2862 case FLUSH_RETRY_EXC_ERR:
2863 return IB_WC_RETRY_EXC_ERR;
2864 case FLUSH_FATAL_ERR:
2866 return IB_WC_FATAL_ERR;
2871 set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
2872 struct ib_wc *entry)
2874 struct irdma_sc_qp *qp;
2876 switch (cq_poll_info->op_type) {
2877 case IRDMA_OP_TYPE_RDMA_WRITE:
2878 case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
2879 entry->opcode = IB_WC_RDMA_WRITE;
2881 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
2882 case IRDMA_OP_TYPE_RDMA_READ:
2883 entry->opcode = IB_WC_RDMA_READ;
2885 case IRDMA_OP_TYPE_SEND_SOL:
2886 case IRDMA_OP_TYPE_SEND_SOL_INV:
2887 case IRDMA_OP_TYPE_SEND_INV:
2888 case IRDMA_OP_TYPE_SEND:
2889 entry->opcode = IB_WC_SEND;
2891 case IRDMA_OP_TYPE_FAST_REG_NSMR:
2892 entry->opcode = IB_WC_REG_MR;
2894 case IRDMA_OP_TYPE_INV_STAG:
2895 entry->opcode = IB_WC_LOCAL_INV;
2898 qp = cq_poll_info->qp_handle;
2899 ibdev_err(irdma_get_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
2900 cq_poll_info->op_type);
2901 entry->status = IB_WC_GENERAL_ERR;
2906 set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
2907 struct ib_wc *entry, bool send_imm_support)
2910 * iWARP does not support sendImm, so the presence of Imm data
2913 if (!send_imm_support) {
2914 entry->opcode = cq_poll_info->imm_valid ?
2915 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
2918 switch (cq_poll_info->op_type) {
2919 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
2920 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
2921 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2924 entry->opcode = IB_WC_RECV;
2929 * irdma_process_cqe - process cqe info
2930 * @entry: processed cqe
2931 * @cq_poll_info: cqe info
2934 irdma_process_cqe(struct ib_wc *entry,
2935 struct irdma_cq_poll_info *cq_poll_info)
2937 struct irdma_sc_qp *qp;
2939 entry->wc_flags = 0;
2940 entry->pkey_index = 0;
2941 entry->wr_id = cq_poll_info->wr_id;
2943 qp = cq_poll_info->qp_handle;
2944 entry->qp = qp->qp_uk.back_qp;
2946 if (cq_poll_info->error) {
2947 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
2948 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
2950 entry->vendor_err = cq_poll_info->major_err << 16 |
2951 cq_poll_info->minor_err;
2953 entry->status = IB_WC_SUCCESS;
2954 if (cq_poll_info->imm_valid) {
2955 entry->ex.imm_data = htonl(cq_poll_info->imm_data);
2956 entry->wc_flags |= IB_WC_WITH_IMM;
2958 if (cq_poll_info->ud_smac_valid) {
2959 ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
2960 entry->wc_flags |= IB_WC_WITH_SMAC;
2963 if (cq_poll_info->ud_vlan_valid) {
2964 u16 vlan = cq_poll_info->ud_vlan & EVL_VLID_MASK;
2966 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
2968 entry->vlan_id = vlan;
2969 entry->wc_flags |= IB_WC_WITH_VLAN;
2976 if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
2977 set_ib_wc_op_sq(cq_poll_info, entry);
2979 set_ib_wc_op_rq(cq_poll_info, entry,
2980 qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
2982 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
2983 cq_poll_info->stag_invalid_set) {
2984 entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
2985 entry->wc_flags |= IB_WC_WITH_INVALIDATE;
2989 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
2990 entry->src_qp = cq_poll_info->ud_src_qpn;
2993 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
2994 entry->network_hdr_type = cq_poll_info->ipv4 ?
2998 entry->src_qp = cq_poll_info->qp_id;
3001 entry->byte_len = cq_poll_info->bytes_xfered;
3005 * irdma_poll_one - poll one entry of the CQ
3006 * @ukcq: ukcq to poll
3007 * @cur_cqe: current CQE info to be filled in
3008 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
3010 * Returns the internal irdma device error code or 0 on success
3013 irdma_poll_one(struct irdma_cq_uk *ukcq,
3014 struct irdma_cq_poll_info *cur_cqe,
3015 struct ib_wc *entry)
3017 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3022 irdma_process_cqe(entry, cur_cqe);
3028 * __irdma_poll_cq - poll cq for completion (kernel apps)
3030 * @num_entries: number of entries to poll
3031 * @entry: wr of a completed entry
3034 __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3036 struct list_head *tmp_node, *list_node;
3037 struct irdma_cq_buf *last_buf = NULL;
3038 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3039 struct irdma_cq_buf *cq_buf;
3041 struct irdma_device *iwdev;
3042 struct irdma_cq_uk *ukcq;
3043 bool cq_new_cqe = false;
3044 int resized_bufs = 0;
3047 iwdev = to_iwdev(iwcq->ibcq.device);
3048 ukcq = &iwcq->sc_cq.cq_uk;
3050 /* go through the list of previously resized CQ buffers */
3051 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3052 cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3053 while (npolled < num_entries) {
3054 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3062 /* QP using the CQ is destroyed. Skip reporting this CQE */
3063 if (ret == -EFAULT) {
3070 /* save the resized CQ buffer which received the last cqe */
3076 /* check the current CQ for new cqes */
3077 while (npolled < num_entries) {
3078 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3079 if (ret == -ENOENT) {
3080 ret = irdma_generated_cmpls(iwcq, cur_cqe);
3082 irdma_process_cqe(entry + npolled, cur_cqe);
3092 /* QP using the CQ is destroyed. Skip reporting this CQE */
3093 if (ret == -EFAULT) {
3101 /* all previous CQ resizes are complete */
3102 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3104 /* only CQ resizes up to the last_buf are complete */
3105 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3107 /* report to the HW the number of complete CQ resizes */
3108 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3112 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3113 "%s: Error polling CQ, irdma_err: %d\n", __func__, ret);
3119 * irdma_poll_cq - poll cq for completion (kernel apps)
3121 * @num_entries: number of entries to poll
3122 * @entry: wr of a completed entry
3125 irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3126 struct ib_wc *entry)
3128 struct irdma_cq *iwcq;
3129 unsigned long flags;
3132 iwcq = to_iwcq(ibcq);
3134 spin_lock_irqsave(&iwcq->lock, flags);
3135 ret = __irdma_poll_cq(iwcq, num_entries, entry);
3136 spin_unlock_irqrestore(&iwcq->lock, flags);
3142 * irdma_req_notify_cq - arm cq kernel application
3144 * @notify_flags: notofication flags
3147 irdma_req_notify_cq(struct ib_cq *ibcq,
3148 enum ib_cq_notify_flags notify_flags)
3150 struct irdma_cq *iwcq;
3151 struct irdma_cq_uk *ukcq;
3152 unsigned long flags;
3153 enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
3154 bool promo_event = false;
3157 iwcq = to_iwcq(ibcq);
3158 ukcq = &iwcq->sc_cq.cq_uk;
3160 spin_lock_irqsave(&iwcq->lock, flags);
3161 if (notify_flags == IB_CQ_SOLICITED) {
3162 cq_notify = IRDMA_CQ_COMPL_SOLICITED;
3164 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED)
3168 if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
3169 iwcq->last_notify = cq_notify;
3170 irdma_uk_cq_request_notification(ukcq, cq_notify);
3173 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3174 (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
3176 spin_unlock_irqrestore(&iwcq->lock, flags);
3182 * mcast_list_add - Add a new mcast item to list
3183 * @rf: RDMA PCI function
3184 * @new_elem: pointer to element to add
3187 mcast_list_add(struct irdma_pci_f *rf,
3188 struct mc_table_list *new_elem)
3190 list_add(&new_elem->list, &rf->mc_qht_list.list);
3194 * mcast_list_del - Remove an mcast item from list
3195 * @mc_qht_elem: pointer to mcast table list element
3198 mcast_list_del(struct mc_table_list *mc_qht_elem)
3201 list_del(&mc_qht_elem->list);
3205 * mcast_list_lookup_ip - Search mcast list for address
3206 * @rf: RDMA PCI function
3207 * @ip_mcast: pointer to mcast IP address
3209 static struct mc_table_list *
3210 mcast_list_lookup_ip(struct irdma_pci_f *rf,
3213 struct mc_table_list *mc_qht_el;
3214 struct list_head *pos, *q;
3216 list_for_each_safe(pos, q, &rf->mc_qht_list.list) {
3217 mc_qht_el = list_entry(pos, struct mc_table_list, list);
3218 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
3219 sizeof(mc_qht_el->mc_info.dest_ip)))
3227 * irdma_mcast_cqp_op - perform a mcast cqp operation
3228 * @iwdev: irdma device
3229 * @mc_grp_ctx: mcast group info
3232 * returns error status
3235 irdma_mcast_cqp_op(struct irdma_device *iwdev,
3236 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
3238 struct cqp_cmds_info *cqp_info;
3239 struct irdma_cqp_request *cqp_request;
3242 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3246 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
3247 cqp_info = &cqp_request->info;
3248 cqp_info->cqp_cmd = op;
3249 cqp_info->post_sq = 1;
3250 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
3251 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3252 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3253 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3259 * irdma_attach_mcast - attach a qp to a multicast group
3261 * @ibgid: pointer to global ID
3264 * returns error status
3267 irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3269 struct irdma_qp *iwqp = to_iwqp(ibqp);
3270 struct irdma_device *iwdev = iwqp->iwdev;
3271 struct irdma_pci_f *rf = iwdev->rf;
3272 struct mc_table_list *mc_qht_elem;
3273 struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
3274 unsigned long flags;
3275 u32 ip_addr[4] = {0};
3282 struct sockaddr saddr;
3283 struct sockaddr_in saddr_in;
3284 struct sockaddr_in6 saddr_in6;
3286 unsigned char dmac[ETH_ALEN];
3288 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3290 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
3291 irdma_copy_ip_ntohl(ip_addr,
3292 sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
3293 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
3295 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3296 "qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
3298 irdma_mcast_mac_v6(ip_addr, dmac);
3300 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3302 vlan_id = irdma_get_vlan_ipv4(ip_addr);
3303 irdma_mcast_mac_v4(ip_addr, dmac);
3304 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3305 "qp_id=%d, IP4address=%pI4, MAC=%pM\n",
3306 ibqp->qp_num, ip_addr, dmac);
3309 spin_lock_irqsave(&rf->qh_list_lock, flags);
3310 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3312 struct irdma_dma_mem *dma_mem_mc;
3314 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3315 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
3319 mc_qht_elem->mc_info.ipv4_valid = ipv4;
3320 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
3321 sizeof(mc_qht_elem->mc_info.dest_ip));
3322 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
3323 &mgn, &rf->next_mcg);
3329 mc_qht_elem->mc_info.mgn = mgn;
3330 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
3331 dma_mem_mc->size = sizeof(u64)* IRDMA_MAX_MGS_PER_CTX;
3332 dma_mem_mc->va = irdma_allocate_dma_mem(&rf->hw, dma_mem_mc,
3334 IRDMA_HW_PAGE_SIZE);
3335 if (!dma_mem_mc->va) {
3336 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
3341 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
3342 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
3343 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
3344 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
3345 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
3346 if (vlan_id < VLAN_N_VID)
3347 mc_qht_elem->mc_grp_ctx.vlan_valid = true;
3348 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
3349 mc_qht_elem->mc_grp_ctx.qs_handle =
3350 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
3351 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
3353 spin_lock_irqsave(&rf->qh_list_lock, flags);
3354 mcast_list_add(rf, mc_qht_elem);
3356 if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
3357 IRDMA_MAX_MGS_PER_CTX) {
3358 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3363 mcg_info.qp_id = iwqp->ibqp.qp_num;
3364 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
3365 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3366 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3368 /* Only if there is a change do we need to modify or create */
3370 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3371 IRDMA_OP_MC_CREATE);
3372 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3373 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3374 IRDMA_OP_MC_MODIFY);
3385 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3386 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3387 mcast_list_del(mc_qht_elem);
3388 irdma_free_dma_mem(&rf->hw,
3389 &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
3390 irdma_free_rsrc(rf, rf->allocated_mcgs,
3391 mc_qht_elem->mc_grp_ctx.mg_id);
3399 * irdma_detach_mcast - detach a qp from a multicast group
3401 * @ibgid: pointer to global ID
3404 * returns error status
3407 irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3409 struct irdma_qp *iwqp = to_iwqp(ibqp);
3410 struct irdma_device *iwdev = iwqp->iwdev;
3411 struct irdma_pci_f *rf = iwdev->rf;
3412 u32 ip_addr[4] = {0};
3413 struct mc_table_list *mc_qht_elem;
3414 struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
3416 unsigned long flags;
3418 struct sockaddr saddr;
3419 struct sockaddr_in saddr_in;
3420 struct sockaddr_in6 saddr_in6;
3423 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3424 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
3425 irdma_copy_ip_ntohl(ip_addr,
3426 sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
3428 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3430 spin_lock_irqsave(&rf->qh_list_lock, flags);
3431 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3433 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3434 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3435 "address not found MCG\n");
3439 mcg_info.qp_id = iwqp->ibqp.qp_num;
3440 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3441 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3442 mcast_list_del(mc_qht_elem);
3443 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3444 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3445 IRDMA_OP_MC_DESTROY);
3447 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3448 "failed MC_DESTROY MCG\n");
3449 spin_lock_irqsave(&rf->qh_list_lock, flags);
3450 mcast_list_add(rf, mc_qht_elem);
3451 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3455 irdma_free_dma_mem(&rf->hw,
3456 &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
3457 irdma_free_rsrc(rf, rf->allocated_mcgs,
3458 mc_qht_elem->mc_grp_ctx.mg_id);
3461 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3462 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3463 IRDMA_OP_MC_MODIFY);
3465 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3466 "failed Modify MCG\n");
3475 * irdma_query_ah - Query address handle
3476 * @ibah: pointer to address handle
3477 * @ah_attr: address handle attributes
3480 irdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
3482 struct irdma_ah *ah = to_iwah(ibah);
3484 memset(ah_attr, 0, sizeof(*ah_attr));
3485 if (ah->av.attrs.ah_flags & IB_AH_GRH) {
3486 ah_attr->ah_flags = IB_AH_GRH;
3487 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
3488 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
3489 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
3490 ah_attr->grh.sgid_index = ah->sgid_index;
3491 ah_attr->grh.sgid_index = ah->sgid_index;
3492 memcpy(&ah_attr->grh.dgid, &ah->dgid,
3493 sizeof(ah_attr->grh.dgid));
3499 static __be64 irdma_mac_to_guid(struct ifnet *ndev){
3500 const unsigned char *mac = IF_LLADDR(ndev);
3502 unsigned char *dst = (unsigned char *)&guid;
3504 dst[0] = mac[0] ^ 2;
3516 static struct ifnet *
3517 irdma_get_netdev(struct ib_device *ibdev, u8 port_num)
3519 struct irdma_device *iwdev = to_iwdev(ibdev);
3521 if (iwdev->netdev) {
3522 dev_hold(iwdev->netdev);
3523 return iwdev->netdev;
3530 irdma_set_device_ops(struct ib_device *ibdev)
3532 struct ib_device *dev_ops = ibdev;
3534 #if __FreeBSD_version >= 1400000
3535 dev_ops->ops.driver_id = RDMA_DRIVER_I40IW;
3536 dev_ops->ops.size_ib_ah = IRDMA_SET_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah);
3537 dev_ops->ops.size_ib_cq = IRDMA_SET_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq);
3538 dev_ops->ops.size_ib_pd = IRDMA_SET_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd);
3539 dev_ops->ops.size_ib_ucontext = IRDMA_SET_RDMA_OBJ_SIZE(ib_ucontext,
3543 #endif /* __FreeBSD_version >= 1400000 */
3544 dev_ops->alloc_hw_stats = irdma_alloc_hw_stats;
3545 dev_ops->alloc_mr = irdma_alloc_mr;
3546 dev_ops->alloc_mw = irdma_alloc_mw;
3547 dev_ops->alloc_pd = irdma_alloc_pd;
3548 dev_ops->alloc_ucontext = irdma_alloc_ucontext;
3549 dev_ops->create_cq = irdma_create_cq;
3550 dev_ops->create_qp = irdma_create_qp;
3551 dev_ops->dealloc_mw = irdma_dealloc_mw;
3552 dev_ops->dealloc_pd = irdma_dealloc_pd;
3553 dev_ops->dealloc_ucontext = irdma_dealloc_ucontext;
3554 dev_ops->dereg_mr = irdma_dereg_mr;
3555 dev_ops->destroy_cq = irdma_destroy_cq;
3556 dev_ops->destroy_qp = irdma_destroy_qp;
3557 dev_ops->disassociate_ucontext = irdma_disassociate_ucontext;
3558 dev_ops->get_dev_fw_str = irdma_get_dev_fw_str;
3559 dev_ops->get_dma_mr = irdma_get_dma_mr;
3560 dev_ops->get_hw_stats = irdma_get_hw_stats;
3561 dev_ops->get_netdev = irdma_get_netdev;
3562 dev_ops->map_mr_sg = irdma_map_mr_sg;
3563 dev_ops->mmap = irdma_mmap;
3564 #if __FreeBSD_version >= 1400026
3565 dev_ops->mmap_free = irdma_mmap_free;
3567 dev_ops->poll_cq = irdma_poll_cq;
3568 dev_ops->post_recv = irdma_post_recv;
3569 dev_ops->post_send = irdma_post_send;
3570 dev_ops->query_device = irdma_query_device;
3571 dev_ops->query_port = irdma_query_port;
3572 dev_ops->modify_port = irdma_modify_port;
3573 dev_ops->query_qp = irdma_query_qp;
3574 dev_ops->reg_user_mr = irdma_reg_user_mr;
3575 dev_ops->rereg_user_mr = irdma_rereg_user_mr;
3576 dev_ops->req_notify_cq = irdma_req_notify_cq;
3577 dev_ops->resize_cq = irdma_resize_cq;
3581 irdma_set_device_mcast_ops(struct ib_device *ibdev)
3583 struct ib_device *dev_ops = ibdev;
3584 dev_ops->attach_mcast = irdma_attach_mcast;
3585 dev_ops->detach_mcast = irdma_detach_mcast;
3589 irdma_set_device_roce_ops(struct ib_device *ibdev)
3591 struct ib_device *dev_ops = ibdev;
3592 dev_ops->create_ah = irdma_create_ah;
3593 dev_ops->destroy_ah = irdma_destroy_ah;
3594 dev_ops->get_link_layer = irdma_get_link_layer;
3595 dev_ops->get_port_immutable = irdma_roce_port_immutable;
3596 dev_ops->modify_qp = irdma_modify_qp_roce;
3597 dev_ops->query_ah = irdma_query_ah;
3598 dev_ops->query_gid = irdma_query_gid_roce;
3599 dev_ops->query_pkey = irdma_query_pkey;
3600 ibdev->add_gid = irdma_add_gid;
3601 ibdev->del_gid = irdma_del_gid;
3605 irdma_set_device_iw_ops(struct ib_device *ibdev)
3607 struct ib_device *dev_ops = ibdev;
3609 ibdev->uverbs_cmd_mask |=
3610 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
3611 (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
3613 dev_ops->create_ah = irdma_create_ah_stub;
3614 dev_ops->destroy_ah = irdma_destroy_ah_stub;
3615 dev_ops->get_port_immutable = irdma_iw_port_immutable;
3616 dev_ops->modify_qp = irdma_modify_qp;
3617 dev_ops->query_gid = irdma_query_gid;
3618 dev_ops->query_pkey = irdma_iw_query_pkey;
3622 irdma_set_device_gen1_ops(struct ib_device *ibdev)
3627 * irdma_init_roce_device - initialization of roce rdma device
3628 * @iwdev: irdma device
3631 irdma_init_roce_device(struct irdma_device *iwdev)
3633 kc_set_roce_uverbs_cmd_mask(iwdev);
3634 iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
3635 iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
3636 irdma_set_device_roce_ops(&iwdev->ibdev);
3637 if (iwdev->rf->rdma_ver == IRDMA_GEN_2)
3638 irdma_set_device_mcast_ops(&iwdev->ibdev);
3642 * irdma_init_iw_device - initialization of iwarp rdma device
3643 * @iwdev: irdma device
3646 irdma_init_iw_device(struct irdma_device *iwdev)
3648 struct ifnet *netdev = iwdev->netdev;
3650 iwdev->ibdev.node_type = RDMA_NODE_RNIC;
3651 ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, IF_LLADDR(netdev));
3652 iwdev->ibdev.iwcm = kzalloc(sizeof(*iwdev->ibdev.iwcm), GFP_KERNEL);
3653 if (!iwdev->ibdev.iwcm)
3656 iwdev->ibdev.iwcm->add_ref = irdma_qp_add_ref;
3657 iwdev->ibdev.iwcm->rem_ref = irdma_qp_rem_ref;
3658 iwdev->ibdev.iwcm->get_qp = irdma_get_qp;
3659 iwdev->ibdev.iwcm->connect = irdma_connect;
3660 iwdev->ibdev.iwcm->accept = irdma_accept;
3661 iwdev->ibdev.iwcm->reject = irdma_reject;
3662 iwdev->ibdev.iwcm->create_listen = irdma_create_listen;
3663 iwdev->ibdev.iwcm->destroy_listen = irdma_destroy_listen;
3664 memcpy(iwdev->ibdev.iwcm->ifname, if_name(netdev),
3665 sizeof(iwdev->ibdev.iwcm->ifname));
3666 irdma_set_device_iw_ops(&iwdev->ibdev);
3672 * irdma_init_rdma_device - initialization of rdma device
3673 * @iwdev: irdma device
3676 irdma_init_rdma_device(struct irdma_device *iwdev)
3678 struct pci_dev *pcidev = iwdev->rf->pcidev;
3681 iwdev->ibdev.owner = THIS_MODULE;
3682 iwdev->ibdev.uverbs_abi_ver = IRDMA_ABI_VER;
3683 kc_set_rdma_uverbs_cmd_mask(iwdev);
3685 if (iwdev->roce_mode) {
3686 irdma_init_roce_device(iwdev);
3688 ret = irdma_init_iw_device(iwdev);
3693 iwdev->ibdev.phys_port_cnt = 1;
3694 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
3695 iwdev->ibdev.dev.parent = iwdev->rf->dev_ctx.dev;
3696 set_ibdev_dma_device(iwdev->ibdev, &pcidev->dev);
3697 irdma_set_device_ops(&iwdev->ibdev);
3698 if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
3699 irdma_set_device_gen1_ops(&iwdev->ibdev);
3705 * irdma_port_ibevent - indicate port event
3706 * @iwdev: irdma device
3709 irdma_port_ibevent(struct irdma_device *iwdev)
3711 struct ib_event event;
3713 event.device = &iwdev->ibdev;
3714 event.element.port_num = 1;
3716 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3717 ib_dispatch_event(&event);
3721 * irdma_ib_unregister_device - unregister rdma device from IB
3723 * @iwdev: irdma device
3726 irdma_ib_unregister_device(struct irdma_device *iwdev)
3728 iwdev->iw_status = 0;
3729 irdma_port_ibevent(iwdev);
3730 ib_unregister_device(&iwdev->ibdev);
3731 dev_put(iwdev->netdev);
3732 kfree(iwdev->ibdev.iwcm);
3733 iwdev->ibdev.iwcm = NULL;
3737 * irdma_ib_register_device - register irdma device to IB core
3738 * @iwdev: irdma device
3741 irdma_ib_register_device(struct irdma_device *iwdev)
3745 ret = irdma_init_rdma_device(iwdev);
3749 dev_hold(iwdev->netdev);
3750 sprintf(iwdev->ibdev.name, "irdma-%s", if_name(iwdev->netdev));
3751 ret = ib_register_device(&iwdev->ibdev, NULL);
3755 iwdev->iw_status = 1;
3756 irdma_port_ibevent(iwdev);
3761 kfree(iwdev->ibdev.iwcm);
3762 iwdev->ibdev.iwcm = NULL;
3763 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "Register RDMA device fail\n");