2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
4 * Copyright (c) 2015 - 2022 Intel Corporation
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "irdma_main.h"
39 * irdma_query_device - get device attributes
40 * @ibdev: device pointer from stack
41 * @props: returning device attributes
45 irdma_query_device(struct ib_device *ibdev,
46 struct ib_device_attr *props,
47 struct ib_udata *udata)
49 struct irdma_device *iwdev = to_iwdev(ibdev);
50 struct irdma_pci_f *rf = iwdev->rf;
51 struct pci_dev *pcidev = iwdev->rf->pcidev;
52 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
54 if (udata->inlen || udata->outlen)
57 memset(props, 0, sizeof(*props));
58 addrconf_addr_eui48((u8 *)&props->sys_image_guid,
59 if_getlladdr(iwdev->netdev));
60 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
61 irdma_fw_minor_ver(&rf->sc_dev);
62 props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
63 IB_DEVICE_MEM_MGT_EXTENSIONS;
64 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
65 props->vendor_id = pcidev->vendor;
66 props->vendor_part_id = pcidev->device;
67 props->hw_ver = pcidev->revision;
68 props->page_size_cap = hw_attrs->page_size_cap;
69 props->max_mr_size = hw_attrs->max_mr_size;
70 props->max_qp = rf->max_qp - rf->used_qps;
71 props->max_qp_wr = hw_attrs->max_qp_wr;
72 set_max_sge(props, rf);
73 props->max_cq = rf->max_cq - rf->used_cqs;
74 props->max_cqe = rf->max_cqe - 1;
75 props->max_mr = rf->max_mr - rf->used_mrs;
76 props->max_mw = props->max_mr;
77 props->max_pd = rf->max_pd - rf->used_pds;
78 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
79 props->max_qp_rd_atom = hw_attrs->max_hw_ird;
80 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
81 if (rdma_protocol_roce(ibdev, 1)) {
82 props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
83 props->max_pkeys = IRDMA_PKEY_TBL_SZ;
84 props->max_ah = rf->max_ah;
85 if (hw_attrs->uk_attrs.hw_rev == IRDMA_GEN_2) {
86 props->max_mcast_grp = rf->max_mcg;
87 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
88 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
91 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
92 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
93 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
99 irdma_mmap_legacy(struct irdma_ucontext *ucontext,
100 struct vm_area_struct *vma)
104 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
107 vma->vm_private_data = ucontext;
108 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
109 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
111 #if __FreeBSD_version >= 1400026
112 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
113 pgprot_noncached(vma->vm_page_prot), NULL);
115 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
116 pgprot_noncached(vma->vm_page_prot));
120 #if __FreeBSD_version >= 1400026
122 irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
124 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
129 struct rdma_user_mmap_entry *
130 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
131 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
133 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
139 entry->bar_offset = bar_offset;
140 entry->mmap_flag = mmap_flag;
142 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
143 &entry->rdma_entry, PAGE_SIZE);
148 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
150 return &entry->rdma_entry;
155 find_key_in_mmap_tbl(struct irdma_ucontext *ucontext, u64 key)
157 struct irdma_user_mmap_entry *entry;
159 HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, key) {
160 if (entry->pgoff_key == key)
167 struct irdma_user_mmap_entry *
168 irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
169 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
171 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
178 entry->bar_offset = bar_offset;
179 entry->mmap_flag = mmap_flag;
180 entry->ucontext = ucontext;
182 get_random_bytes(&entry->pgoff_key, sizeof(entry->pgoff_key));
184 /* The key is a page offset */
185 entry->pgoff_key >>= PAGE_SHIFT;
187 /* In the event of a collision in the hash table, retry a new key */
188 spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
189 if (!find_key_in_mmap_tbl(ucontext, entry->pgoff_key)) {
190 HASH_ADD(ucontext->mmap_hash_tbl, &entry->hlist, entry->pgoff_key);
191 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
194 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
195 } while (retry_cnt++ < 10);
197 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
198 "mmap table add failed: Cannot find a unique key\n");
203 /* libc mmap uses a byte offset */
204 *mmap_offset = entry->pgoff_key << PAGE_SHIFT;
209 static struct irdma_user_mmap_entry *
210 irdma_find_user_mmap_entry(struct irdma_ucontext *ucontext,
211 struct vm_area_struct *vma)
213 struct irdma_user_mmap_entry *entry;
216 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
219 spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
220 HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, vma->vm_pgoff) {
221 if (entry->pgoff_key == vma->vm_pgoff) {
222 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
227 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
233 irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry)
235 struct irdma_ucontext *ucontext;
241 ucontext = entry->ucontext;
243 spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
244 HASH_DEL(ucontext->mmap_hash_tbl, &entry->hlist);
245 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
252 * irdma_mmap - user memory map
253 * @context: context created during alloc
254 * @vma: kernel info for user memory map
257 irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
259 #if __FreeBSD_version >= 1400026
260 struct rdma_user_mmap_entry *rdma_entry;
262 struct irdma_user_mmap_entry *entry;
263 struct irdma_ucontext *ucontext;
267 ucontext = to_ucontext(context);
269 /* Legacy support for libi40iw with hard-coded mmap key */
270 if (ucontext->legacy_mode)
271 return irdma_mmap_legacy(ucontext, vma);
273 #if __FreeBSD_version >= 1400026
274 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
276 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
277 "pgoff[0x%lx] does not have valid entry\n",
282 entry = to_irdma_mmap_entry(rdma_entry);
284 entry = irdma_find_user_mmap_entry(ucontext, vma);
286 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
287 "pgoff[0x%lx] does not have valid entry\n",
292 irdma_debug(&ucontext->iwdev->rf->sc_dev,
293 IRDMA_DEBUG_VERBS, "bar_offset [0x%lx] mmap_flag [%d]\n",
294 entry->bar_offset, entry->mmap_flag);
296 pfn = (entry->bar_offset +
297 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
299 switch (entry->mmap_flag) {
300 case IRDMA_MMAP_IO_NC:
301 #if __FreeBSD_version >= 1400026
302 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
303 pgprot_noncached(vma->vm_page_prot),
306 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
307 pgprot_noncached(vma->vm_page_prot));
310 case IRDMA_MMAP_IO_WC:
311 #if __FreeBSD_version >= 1400026
312 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
313 pgprot_writecombine(vma->vm_page_prot),
316 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
317 pgprot_writecombine(vma->vm_page_prot));
325 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
326 "bar_offset [0x%lx] mmap_flag[%d] err[%d]\n",
327 entry->bar_offset, entry->mmap_flag, ret);
328 #if __FreeBSD_version >= 1400026
329 rdma_user_mmap_entry_put(rdma_entry);
336 * irdma_alloc_push_page - allocate a push page for qp
340 irdma_alloc_push_page(struct irdma_qp *iwqp)
342 struct irdma_cqp_request *cqp_request;
343 struct cqp_cmds_info *cqp_info;
344 struct irdma_device *iwdev = iwqp->iwdev;
345 struct irdma_sc_qp *qp = &iwqp->sc_qp;
348 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
352 cqp_info = &cqp_request->info;
353 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
354 cqp_info->post_sq = 1;
355 cqp_info->in.u.manage_push_page.info.push_idx = 0;
356 cqp_info->in.u.manage_push_page.info.qs_handle =
357 qp->vsi->qos[qp->user_pri].qs_handle;
358 cqp_info->in.u.manage_push_page.info.free_page = 0;
359 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
360 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
361 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
363 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
364 if (!status && cqp_request->compl_info.op_ret_val <
365 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
366 qp->push_idx = cqp_request->compl_info.op_ret_val;
370 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
374 * irdma_get_pbl - Retrieve pbl from a list given a virtual
376 * @va: user virtual address
377 * @pbl_list: pbl list to search in (QP's or CQ's)
380 irdma_get_pbl(unsigned long va,
381 struct list_head *pbl_list)
383 struct irdma_pbl *iwpbl;
385 list_for_each_entry(iwpbl, pbl_list, list) {
386 if (iwpbl->user_base == va) {
387 list_del(&iwpbl->list);
388 iwpbl->on_list = false;
397 * irdma_clean_cqes - clean cq entries for qp
398 * @iwqp: qp ptr (user or kernel)
402 irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
404 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
407 spin_lock_irqsave(&iwcq->lock, flags);
408 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
409 spin_unlock_irqrestore(&iwcq->lock, flags);
412 static u64 irdma_compute_push_wqe_offset(struct irdma_device *iwdev, u32 page_idx){
413 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
415 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
416 /* skip over db page */
417 bar_off += IRDMA_HW_PAGE_SIZE;
418 /* skip over reserved space */
419 bar_off += IRDMA_PF_BAR_RSVD;
423 bar_off += (u64)page_idx * IRDMA_HW_PAGE_SIZE;
429 irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
431 if (iwqp->push_db_mmap_entry) {
432 #if __FreeBSD_version >= 1400026
433 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
435 irdma_user_mmap_entry_del_hash(iwqp->push_db_mmap_entry);
437 iwqp->push_db_mmap_entry = NULL;
439 if (iwqp->push_wqe_mmap_entry) {
440 #if __FreeBSD_version >= 1400026
441 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
443 irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
445 iwqp->push_wqe_mmap_entry = NULL;
450 irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
451 struct irdma_qp *iwqp,
452 u64 *push_wqe_mmap_key,
453 u64 *push_db_mmap_key)
455 struct irdma_device *iwdev = ucontext->iwdev;
458 WARN_ON_ONCE(iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_2);
460 bar_off = irdma_compute_push_wqe_offset(iwdev, iwqp->sc_qp.push_idx);
462 #if __FreeBSD_version >= 1400026
463 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
464 bar_off, IRDMA_MMAP_IO_WC,
467 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
471 if (!iwqp->push_wqe_mmap_entry)
474 /* push doorbell page */
475 bar_off += IRDMA_HW_PAGE_SIZE;
476 #if __FreeBSD_version >= 1400026
477 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
478 bar_off, IRDMA_MMAP_IO_NC,
482 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
486 if (!iwqp->push_db_mmap_entry) {
487 #if __FreeBSD_version >= 1400026
488 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
490 irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
499 * irdma_setup_virt_qp - setup for allocation of virtual qp
500 * @iwdev: irdma device
502 * @init_info: initialize info to return
505 irdma_setup_virt_qp(struct irdma_device *iwdev,
506 struct irdma_qp *iwqp,
507 struct irdma_qp_init_info *init_info)
509 struct irdma_pbl *iwpbl = iwqp->iwpbl;
510 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
512 iwqp->page = qpmr->sq_page;
513 init_info->shadow_area_pa = qpmr->shadow;
514 if (iwpbl->pbl_allocated) {
515 init_info->virtual_map = true;
516 init_info->sq_pa = qpmr->sq_pbl.idx;
517 init_info->rq_pa = qpmr->rq_pbl.idx;
519 init_info->sq_pa = qpmr->sq_pbl.addr;
520 init_info->rq_pa = qpmr->rq_pbl.addr;
525 * irdma_setup_umode_qp - setup sq and rq size in user mode qp
527 * @iwdev: iwarp device
528 * @iwqp: qp ptr (user or kernel)
529 * @info: initialize info to return
530 * @init_attr: Initial QP create attributes
533 irdma_setup_umode_qp(struct ib_udata *udata,
534 struct irdma_device *iwdev,
535 struct irdma_qp *iwqp,
536 struct irdma_qp_init_info *info,
537 struct ib_qp_init_attr *init_attr)
539 #if __FreeBSD_version >= 1400026
540 struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
542 struct irdma_ucontext *ucontext = to_ucontext(iwqp->iwpd->ibpd.uobject->context);
544 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
545 struct irdma_create_qp_req req = {0};
549 ret = ib_copy_from_udata(&req, udata,
550 min(sizeof(req), udata->inlen));
552 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
553 "ib_copy_from_data fail\n");
557 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
559 if (req.user_wqe_bufs) {
560 info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
561 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
562 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
563 &ucontext->qp_reg_mem_list);
564 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
568 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
574 if (!ucontext->use_raw_attrs) {
576 * Maintain backward compat with older ABI which passes sq and
577 * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
578 * There is no way to compute the correct value of
579 * iwqp->max_send_wr/max_recv_wr in the kernel.
581 iwqp->max_send_wr = init_attr->cap.max_send_wr;
582 iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
583 ukinfo->sq_size = init_attr->cap.max_send_wr;
584 ukinfo->rq_size = init_attr->cap.max_recv_wr;
585 irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, &ukinfo->rq_shift);
587 ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
592 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
597 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
598 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
599 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
600 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
602 irdma_setup_virt_qp(iwdev, iwqp, info);
608 * irdma_setup_kmode_qp - setup initialization for kernel mode qp
609 * @iwdev: iwarp device
610 * @iwqp: qp ptr (user or kernel)
611 * @info: initialize info to return
612 * @init_attr: Initial QP create attributes
615 irdma_setup_kmode_qp(struct irdma_device *iwdev,
616 struct irdma_qp *iwqp,
617 struct irdma_qp_init_info *info,
618 struct ib_qp_init_attr *init_attr)
620 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
623 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
625 status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
630 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
635 iwqp->kqp.sq_wrid_mem =
636 kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
637 if (!iwqp->kqp.sq_wrid_mem)
640 iwqp->kqp.rq_wrid_mem =
641 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
642 if (!iwqp->kqp.rq_wrid_mem) {
643 kfree(iwqp->kqp.sq_wrid_mem);
644 iwqp->kqp.sq_wrid_mem = NULL;
648 iwqp->kqp.sig_trk_mem = kcalloc(ukinfo->sq_depth, sizeof(u32), GFP_KERNEL);
649 memset(iwqp->kqp.sig_trk_mem, 0, ukinfo->sq_depth * sizeof(u32));
650 if (!iwqp->kqp.sig_trk_mem) {
651 kfree(iwqp->kqp.sq_wrid_mem);
652 iwqp->kqp.sq_wrid_mem = NULL;
653 kfree(iwqp->kqp.rq_wrid_mem);
654 iwqp->kqp.rq_wrid_mem = NULL;
657 ukinfo->sq_sigwrtrk_array = (void *)iwqp->kqp.sig_trk_mem;
658 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
659 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
661 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
662 size += (IRDMA_SHADOW_AREA_SIZE << 3);
665 mem->va = irdma_allocate_dma_mem(&iwdev->rf->hw, mem, mem->size,
668 kfree(iwqp->kqp.sq_wrid_mem);
669 iwqp->kqp.sq_wrid_mem = NULL;
670 kfree(iwqp->kqp.rq_wrid_mem);
671 iwqp->kqp.rq_wrid_mem = NULL;
675 ukinfo->sq = mem->va;
676 info->sq_pa = mem->pa;
677 ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
678 info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
679 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
680 info->shadow_area_pa = info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
681 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
682 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
683 ukinfo->qp_id = iwqp->ibqp.qp_num;
685 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
686 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
687 init_attr->cap.max_send_wr = iwqp->max_send_wr;
688 init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
694 irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
696 struct irdma_pci_f *rf = iwqp->iwdev->rf;
697 struct irdma_cqp_request *cqp_request;
698 struct cqp_cmds_info *cqp_info;
699 struct irdma_create_qp_info *qp_info;
702 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
706 cqp_info = &cqp_request->info;
707 qp_info = &cqp_request->info.in.u.qp_create.info;
708 memset(qp_info, 0, sizeof(*qp_info));
709 qp_info->mac_valid = true;
710 qp_info->cq_num_valid = true;
711 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
713 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
714 cqp_info->post_sq = 1;
715 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
716 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
717 status = irdma_handle_cqp_op(rf, cqp_request);
718 irdma_put_cqp_request(&rf->cqp, cqp_request);
724 irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
725 struct irdma_qp_host_ctx_info *ctx_info)
727 struct irdma_device *iwdev = iwqp->iwdev;
728 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
729 struct irdma_roce_offload_info *roce_info;
730 struct irdma_udp_offload_info *udp_info;
732 udp_info = &iwqp->udp_info;
733 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
734 udp_info->cwnd = iwdev->roce_cwnd;
735 udp_info->rexmit_thresh = 2;
736 udp_info->rnr_nak_thresh = 2;
737 udp_info->src_port = 0xc000;
738 udp_info->dst_port = ROCE_V2_UDP_DPORT;
739 roce_info = &iwqp->roce_info;
740 ether_addr_copy(roce_info->mac_addr, if_getlladdr(iwdev->netdev));
742 roce_info->rd_en = true;
743 roce_info->wr_rdresp_en = true;
744 roce_info->bind_en = true;
745 roce_info->dcqcn_en = false;
746 roce_info->rtomin = iwdev->roce_rtomin;
748 roce_info->ack_credits = iwdev->roce_ackcreds;
749 roce_info->ird_size = dev->hw_attrs.max_hw_ird;
750 roce_info->ord_size = dev->hw_attrs.max_hw_ord;
752 if (!iwqp->user_mode) {
753 roce_info->priv_mode_en = true;
754 roce_info->fast_reg_en = true;
755 roce_info->udprivcq_en = true;
757 roce_info->roce_tver = 0;
759 ctx_info->roce_info = &iwqp->roce_info;
760 ctx_info->udp_info = &iwqp->udp_info;
761 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
765 irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
766 struct irdma_qp_host_ctx_info *ctx_info)
768 struct irdma_device *iwdev = iwqp->iwdev;
769 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
770 struct irdma_iwarp_offload_info *iwarp_info;
772 iwarp_info = &iwqp->iwarp_info;
773 ether_addr_copy(iwarp_info->mac_addr, if_getlladdr(iwdev->netdev));
774 iwarp_info->rd_en = true;
775 iwarp_info->wr_rdresp_en = true;
776 iwarp_info->bind_en = true;
777 iwarp_info->ecn_en = true;
778 iwarp_info->rtomin = 5;
780 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
781 iwarp_info->ib_rd_en = true;
782 if (!iwqp->user_mode) {
783 iwarp_info->priv_mode_en = true;
784 iwarp_info->fast_reg_en = true;
786 iwarp_info->ddp_ver = 1;
787 iwarp_info->rdmap_ver = 1;
789 ctx_info->iwarp_info = &iwqp->iwarp_info;
790 ctx_info->iwarp_info_valid = true;
791 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
792 ctx_info->iwarp_info_valid = false;
796 irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
797 struct irdma_device *iwdev)
799 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
800 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
802 if (init_attr->create_flags)
805 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
806 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
807 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
810 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
811 if (init_attr->qp_type != IB_QPT_RC &&
812 init_attr->qp_type != IB_QPT_UD &&
813 init_attr->qp_type != IB_QPT_GSI)
816 if (init_attr->qp_type != IB_QPT_RC)
824 irdma_sched_qp_flush_work(struct irdma_qp *iwqp)
826 if (iwqp->sc_qp.qp_uk.destroy_pending)
828 irdma_qp_add_ref(&iwqp->ibqp);
829 if (mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
830 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)))
831 irdma_qp_rem_ref(&iwqp->ibqp);
835 irdma_flush_worker(struct work_struct *work)
837 struct delayed_work *dwork = to_delayed_work(work);
838 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
840 irdma_generate_flush_completions(iwqp);
841 /* For the add in irdma_sched_qp_flush_work */
842 irdma_qp_rem_ref(&iwqp->ibqp);
846 irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
850 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
851 if (iwqp->roce_info.wr_rdresp_en) {
852 acc_flags |= IB_ACCESS_LOCAL_WRITE;
853 acc_flags |= IB_ACCESS_REMOTE_WRITE;
855 if (iwqp->roce_info.rd_en)
856 acc_flags |= IB_ACCESS_REMOTE_READ;
857 if (iwqp->roce_info.bind_en)
858 acc_flags |= IB_ACCESS_MW_BIND;
860 if (iwqp->iwarp_info.wr_rdresp_en) {
861 acc_flags |= IB_ACCESS_LOCAL_WRITE;
862 acc_flags |= IB_ACCESS_REMOTE_WRITE;
864 if (iwqp->iwarp_info.rd_en)
865 acc_flags |= IB_ACCESS_REMOTE_READ;
866 if (iwqp->iwarp_info.bind_en)
867 acc_flags |= IB_ACCESS_MW_BIND;
873 * irdma_query_qp - query qp attributes
875 * @attr: attributes pointer
876 * @attr_mask: Not used
877 * @init_attr: qp attributes to return
880 irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
881 int attr_mask, struct ib_qp_init_attr *init_attr)
883 struct irdma_qp *iwqp = to_iwqp(ibqp);
884 struct irdma_sc_qp *qp = &iwqp->sc_qp;
886 memset(attr, 0, sizeof(*attr));
887 memset(init_attr, 0, sizeof(*init_attr));
889 attr->qp_state = iwqp->ibqp_state;
890 attr->cur_qp_state = iwqp->ibqp_state;
891 attr->cap.max_send_wr = iwqp->max_send_wr;
892 attr->cap.max_recv_wr = iwqp->max_recv_wr;
893 attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
894 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
895 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
896 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
898 if (rdma_protocol_roce(ibqp->device, 1)) {
899 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
900 attr->qkey = iwqp->roce_info.qkey;
901 attr->rq_psn = iwqp->udp_info.epsn;
902 attr->sq_psn = iwqp->udp_info.psn_nxt;
903 attr->dest_qp_num = iwqp->roce_info.dest_qp;
904 attr->pkey_index = iwqp->roce_info.p_key;
905 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
906 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
907 attr->max_rd_atomic = iwqp->roce_info.ord_size;
908 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
911 init_attr->event_handler = iwqp->ibqp.event_handler;
912 init_attr->qp_context = iwqp->ibqp.qp_context;
913 init_attr->send_cq = iwqp->ibqp.send_cq;
914 init_attr->recv_cq = iwqp->ibqp.recv_cq;
915 init_attr->cap = attr->cap;
921 * irdma_modify_qp_roce - modify qp request
922 * @ibqp: qp's pointer for modify
923 * @attr: access attributes
924 * @attr_mask: state mask
928 irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
929 int attr_mask, struct ib_udata *udata)
931 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
932 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
933 struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
934 struct irdma_qp *iwqp = to_iwqp(ibqp);
935 struct irdma_device *iwdev = iwqp->iwdev;
936 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
937 struct irdma_qp_host_ctx_info *ctx_info;
938 struct irdma_roce_offload_info *roce_info;
939 struct irdma_udp_offload_info *udp_info;
940 struct irdma_modify_qp_info info = {0};
941 struct irdma_modify_qp_resp uresp = {};
942 struct irdma_modify_qp_req ureq;
944 u8 issue_modify_qp = 0;
947 ctx_info = &iwqp->ctx_info;
948 roce_info = &iwqp->roce_info;
949 udp_info = &iwqp->udp_info;
952 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
953 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
957 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
960 if (attr_mask & IB_QP_DEST_QPN)
961 roce_info->dest_qp = attr->dest_qp_num;
963 if (attr_mask & IB_QP_PKEY_INDEX) {
964 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
970 if (attr_mask & IB_QP_QKEY)
971 roce_info->qkey = attr->qkey;
973 if (attr_mask & IB_QP_PATH_MTU)
974 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
976 if (attr_mask & IB_QP_SQ_PSN) {
977 udp_info->psn_nxt = attr->sq_psn;
978 udp_info->lsn = 0xffff;
979 udp_info->psn_una = attr->sq_psn;
980 udp_info->psn_max = attr->sq_psn;
983 if (attr_mask & IB_QP_RQ_PSN)
984 udp_info->epsn = attr->rq_psn;
986 if (attr_mask & IB_QP_RNR_RETRY)
987 udp_info->rnr_nak_thresh = attr->rnr_retry;
989 if (attr_mask & IB_QP_RETRY_CNT)
990 udp_info->rexmit_thresh = attr->retry_cnt;
992 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
994 if (attr_mask & IB_QP_AV) {
995 struct irdma_av *av = &iwqp->roce_ah.av;
996 u16 vlan_id = VLAN_N_VID;
997 u32 local_ip[4] = {};
999 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1000 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1001 udp_info->ttl = attr->ah_attr.grh.hop_limit;
1002 udp_info->flow_label = attr->ah_attr.grh.flow_label;
1003 udp_info->tos = attr->ah_attr.grh.traffic_class;
1005 udp_info->src_port = kc_rdma_get_udp_sport(udp_info->flow_label,
1007 roce_info->dest_qp);
1009 irdma_qp_rem_qos(&iwqp->sc_qp);
1010 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1011 if (iwqp->sc_qp.vsi->dscp_mode)
1012 ctx_info->user_pri =
1013 iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
1015 ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1017 ret = kc_irdma_set_roce_cm_info(iwqp, attr, &vlan_id);
1020 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1022 iwqp->sc_qp.user_pri = ctx_info->user_pri;
1023 irdma_qp_add_qos(&iwqp->sc_qp);
1025 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1027 if (vlan_id < VLAN_N_VID) {
1028 udp_info->insert_vlan_tag = true;
1029 udp_info->vlan_tag = vlan_id |
1030 ctx_info->user_pri << VLAN_PRIO_SHIFT;
1032 udp_info->insert_vlan_tag = false;
1035 av->attrs = attr->ah_attr;
1036 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1037 if (av->net_type == RDMA_NETWORK_IPV6) {
1039 av->dgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
1041 av->sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
1043 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1044 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1046 udp_info->ipv4 = false;
1047 irdma_copy_ip_ntohl(local_ip, daddr);
1048 } else if (av->net_type == RDMA_NETWORK_IPV4) {
1049 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1050 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1052 local_ip[0] = ntohl(daddr);
1054 udp_info->ipv4 = true;
1055 udp_info->dest_ip_addr[0] = 0;
1056 udp_info->dest_ip_addr[1] = 0;
1057 udp_info->dest_ip_addr[2] = 0;
1058 udp_info->dest_ip_addr[3] = local_ip[0];
1060 udp_info->local_ipaddr[0] = 0;
1061 udp_info->local_ipaddr[1] = 0;
1062 udp_info->local_ipaddr[2] = 0;
1063 udp_info->local_ipaddr[3] = ntohl(saddr);
1068 irdma_add_arp(iwdev->rf, local_ip,
1069 ah_attr_to_dmac(attr->ah_attr));
1072 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1073 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1074 irdma_dev_err(&iwdev->ibdev,
1075 "rd_atomic = %d, above max_hw_ord=%d\n",
1076 attr->max_rd_atomic,
1077 dev->hw_attrs.max_hw_ord);
1080 if (attr->max_rd_atomic)
1081 roce_info->ord_size = attr->max_rd_atomic;
1082 info.ord_valid = true;
1085 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1086 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1087 irdma_dev_err(&iwdev->ibdev,
1088 "rd_atomic = %d, above max_hw_ird=%d\n",
1089 attr->max_rd_atomic,
1090 dev->hw_attrs.max_hw_ird);
1093 if (attr->max_dest_rd_atomic)
1094 roce_info->ird_size = attr->max_dest_rd_atomic;
1097 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1098 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1099 roce_info->wr_rdresp_en = true;
1100 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1101 roce_info->wr_rdresp_en = true;
1102 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1103 roce_info->rd_en = true;
1106 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1108 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
1109 "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1110 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1111 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1113 spin_lock_irqsave(&iwqp->lock, flags);
1114 if (attr_mask & IB_QP_STATE) {
1115 if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1116 iwqp->ibqp.qp_type, attr_mask,
1117 IB_LINK_LAYER_ETHERNET)) {
1118 irdma_dev_warn(&iwdev->ibdev,
1119 "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1120 iwqp->ibqp.qp_num, iwqp->ibqp_state,
1125 info.curr_iwarp_state = iwqp->iwarp_state;
1127 switch (attr->qp_state) {
1129 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1134 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1135 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1136 issue_modify_qp = 1;
1140 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1144 info.arp_cache_idx_valid = true;
1145 info.cq_num_valid = true;
1146 info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1147 issue_modify_qp = 1;
1150 if (iwqp->ibqp_state < IB_QPS_RTR ||
1151 iwqp->ibqp_state == IB_QPS_ERR) {
1156 info.arp_cache_idx_valid = true;
1157 info.cq_num_valid = true;
1158 info.ord_valid = true;
1159 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1160 issue_modify_qp = 1;
1161 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
1162 iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp);
1163 udp_info->cwnd = iwdev->roce_cwnd;
1164 roce_info->ack_credits = iwdev->roce_ackcreds;
1165 if (iwdev->push_mode && udata &&
1166 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1167 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1168 spin_unlock_irqrestore(&iwqp->lock, flags);
1169 irdma_alloc_push_page(iwqp);
1170 spin_lock_irqsave(&iwqp->lock, flags);
1174 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1177 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1182 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1183 issue_modify_qp = 1;
1188 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
1189 if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
1190 irdma_cqp_qp_suspend_resume(&iwqp->sc_qp, IRDMA_OP_SUSPEND);
1191 spin_unlock_irqrestore(&iwqp->lock, flags);
1192 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1193 irdma_hw_modify_qp(iwdev, iwqp, &info, true);
1194 spin_lock_irqsave(&iwqp->lock, flags);
1197 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1198 spin_unlock_irqrestore(&iwqp->lock, flags);
1199 if (udata && udata->inlen) {
1200 if (ib_copy_from_udata(&ureq, udata,
1201 min(sizeof(ureq), udata->inlen)))
1204 irdma_flush_wqes(iwqp,
1205 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1206 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1212 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1213 issue_modify_qp = 1;
1220 iwqp->ibqp_state = attr->qp_state;
1223 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1224 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1225 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1226 spin_unlock_irqrestore(&iwqp->lock, flags);
1228 if (attr_mask & IB_QP_STATE) {
1229 if (issue_modify_qp) {
1230 ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1231 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1233 spin_lock_irqsave(&iwqp->lock, flags);
1234 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1235 iwqp->iwarp_state = info.next_iwarp_state;
1236 iwqp->ibqp_state = attr->qp_state;
1238 if (iwqp->ibqp_state > IB_QPS_RTS &&
1239 !iwqp->flush_issued) {
1240 spin_unlock_irqrestore(&iwqp->lock, flags);
1241 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1244 iwqp->flush_issued = 1;
1247 spin_unlock_irqrestore(&iwqp->lock, flags);
1250 iwqp->ibqp_state = attr->qp_state;
1252 if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1253 struct irdma_ucontext *ucontext;
1255 #if __FreeBSD_version >= 1400026
1256 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1258 ucontext = to_ucontext(ibqp->uobject->context);
1260 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1261 !iwqp->push_wqe_mmap_entry &&
1262 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1263 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1264 uresp.push_valid = 1;
1265 uresp.push_offset = iwqp->sc_qp.push_offset;
1267 uresp.rd_fence_rate = iwdev->rd_fence_rate;
1268 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1271 irdma_remove_push_mmap_entries(iwqp);
1272 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
1273 "copy_to_udata failed\n");
1281 spin_unlock_irqrestore(&iwqp->lock, flags);
1287 * irdma_modify_qp - modify qp request
1288 * @ibqp: qp's pointer for modify
1289 * @attr: access attributes
1290 * @attr_mask: state mask
1294 irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1295 struct ib_udata *udata)
1297 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1298 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1299 struct irdma_qp *iwqp = to_iwqp(ibqp);
1300 struct irdma_device *iwdev = iwqp->iwdev;
1301 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1302 struct irdma_qp_host_ctx_info *ctx_info;
1303 struct irdma_tcp_offload_info *tcp_info;
1304 struct irdma_iwarp_offload_info *offload_info;
1305 struct irdma_modify_qp_info info = {0};
1306 struct irdma_modify_qp_resp uresp = {};
1307 struct irdma_modify_qp_req ureq = {};
1308 u8 issue_modify_qp = 0;
1311 unsigned long flags;
1314 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1315 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1319 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1322 ctx_info = &iwqp->ctx_info;
1323 offload_info = &iwqp->iwarp_info;
1324 tcp_info = &iwqp->tcp_info;
1325 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1326 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
1327 "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1328 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1329 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1330 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1332 spin_lock_irqsave(&iwqp->lock, flags);
1333 if (attr_mask & IB_QP_STATE) {
1334 info.curr_iwarp_state = iwqp->iwarp_state;
1335 switch (attr->qp_state) {
1338 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1343 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1344 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1345 issue_modify_qp = 1;
1347 if (iwdev->push_mode && udata &&
1348 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1349 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1350 spin_unlock_irqrestore(&iwqp->lock, flags);
1351 irdma_alloc_push_page(iwqp);
1352 spin_lock_irqsave(&iwqp->lock, flags);
1356 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1362 issue_modify_qp = 1;
1363 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1364 iwqp->hte_added = 1;
1365 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1366 info.tcp_ctx_valid = true;
1367 info.ord_valid = true;
1368 info.arp_cache_idx_valid = true;
1369 info.cq_num_valid = true;
1372 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1377 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1378 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1383 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1388 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1389 issue_modify_qp = 1;
1392 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1397 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1398 issue_modify_qp = 1;
1402 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1403 spin_unlock_irqrestore(&iwqp->lock, flags);
1404 if (udata && udata->inlen) {
1405 if (ib_copy_from_udata(&ureq, udata,
1406 min(sizeof(ureq), udata->inlen)))
1409 irdma_flush_wqes(iwqp,
1410 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1411 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1417 if (iwqp->sc_qp.term_flags) {
1418 spin_unlock_irqrestore(&iwqp->lock, flags);
1419 irdma_terminate_del_timer(&iwqp->sc_qp);
1420 spin_lock_irqsave(&iwqp->lock, flags);
1422 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1423 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1425 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1426 info.reset_tcp_conn = true;
1430 issue_modify_qp = 1;
1431 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1438 iwqp->ibqp_state = attr->qp_state;
1440 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1441 ctx_info->iwarp_info_valid = true;
1442 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1443 offload_info->wr_rdresp_en = true;
1444 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1445 offload_info->wr_rdresp_en = true;
1446 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1447 offload_info->rd_en = true;
1450 if (ctx_info->iwarp_info_valid) {
1451 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1452 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1453 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1455 spin_unlock_irqrestore(&iwqp->lock, flags);
1457 if (attr_mask & IB_QP_STATE) {
1458 if (issue_modify_qp) {
1459 ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1460 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1464 spin_lock_irqsave(&iwqp->lock, flags);
1465 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1466 iwqp->iwarp_state = info.next_iwarp_state;
1467 iwqp->ibqp_state = attr->qp_state;
1469 spin_unlock_irqrestore(&iwqp->lock, flags);
1472 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1474 if (iwqp->hw_tcp_state) {
1475 spin_lock_irqsave(&iwqp->lock, flags);
1476 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1477 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1478 spin_unlock_irqrestore(&iwqp->lock, flags);
1480 irdma_cm_disconn(iwqp);
1482 int close_timer_started;
1484 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1486 if (iwqp->cm_node) {
1487 atomic_inc(&iwqp->cm_node->refcnt);
1488 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1489 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1490 if (iwqp->cm_id && close_timer_started == 1)
1491 irdma_schedule_cm_timer(iwqp->cm_node,
1492 (struct irdma_puda_buf *)iwqp,
1493 IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1495 irdma_rem_ref_cm_node(iwqp->cm_node);
1497 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1501 if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
1502 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1503 struct irdma_ucontext *ucontext;
1505 #if __FreeBSD_version >= 1400026
1506 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1508 ucontext = to_ucontext(ibqp->uobject->context);
1510 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1511 !iwqp->push_wqe_mmap_entry &&
1512 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1513 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1514 uresp.push_valid = 1;
1515 uresp.push_offset = iwqp->sc_qp.push_offset;
1517 uresp.rd_fence_rate = iwdev->rd_fence_rate;
1519 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1522 irdma_remove_push_mmap_entries(iwqp);
1523 irdma_debug(&iwdev->rf->sc_dev,
1524 IRDMA_DEBUG_VERBS, "copy_to_udata failed\n");
1531 spin_unlock_irqrestore(&iwqp->lock, flags);
1537 * irdma_cq_free_rsrc - free up resources for cq
1538 * @rf: RDMA PCI function
1542 irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1544 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1546 if (!iwcq->user_mode) {
1547 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem);
1548 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem_shadow);
1551 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1555 * irdma_free_cqbuf - worker to free a cq buffer
1556 * @work: provides access to the cq buffer to free
1559 irdma_free_cqbuf(struct work_struct *work)
1561 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1563 irdma_free_dma_mem(cq_buf->hw, &cq_buf->kmem_buf);
1568 * irdma_process_resize_list - remove resized cq buffers from the resize_list
1569 * @iwcq: cq which owns the resize_list
1570 * @iwdev: irdma device
1571 * @lcqe_buf: the buffer where the last cqe is received
1574 irdma_process_resize_list(struct irdma_cq *iwcq,
1575 struct irdma_device *iwdev,
1576 struct irdma_cq_buf *lcqe_buf)
1578 struct list_head *tmp_node, *list_node;
1579 struct irdma_cq_buf *cq_buf;
1582 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1583 cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1584 if (cq_buf == lcqe_buf)
1587 list_del(&cq_buf->list);
1588 queue_work(iwdev->cleanup_wq, &cq_buf->work);
1596 * irdma_resize_cq - resize cq
1597 * @ibcq: cq to be resized
1598 * @entries: desired cq size
1602 irdma_resize_cq(struct ib_cq *ibcq, int entries,
1603 struct ib_udata *udata)
1605 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
1606 struct irdma_cq *iwcq = to_iwcq(ibcq);
1607 struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1608 struct irdma_cqp_request *cqp_request;
1609 struct cqp_cmds_info *cqp_info;
1610 struct irdma_modify_cq_info *m_info;
1611 struct irdma_modify_cq_info info = {0};
1612 struct irdma_dma_mem kmem_buf;
1613 struct irdma_cq_mr *cqmr_buf;
1614 struct irdma_pbl *iwpbl_buf;
1615 struct irdma_device *iwdev;
1616 struct irdma_pci_f *rf;
1617 struct irdma_cq_buf *cq_buf = NULL;
1618 unsigned long flags;
1621 iwdev = to_iwdev(ibcq->device);
1624 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1625 IRDMA_FEATURE_CQ_RESIZE))
1628 if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
1631 if (entries > rf->max_cqe)
1634 if (!iwcq->user_mode) {
1636 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1640 info.cq_size = max(entries, 4);
1642 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1646 struct irdma_resize_cq_req req = {};
1647 struct irdma_ucontext *ucontext =
1648 #if __FreeBSD_version >= 1400026
1649 rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1651 to_ucontext(ibcq->uobject->context);
1654 /* CQ resize not supported with legacy GEN_1 libi40iw */
1655 if (ucontext->legacy_mode)
1658 if (ib_copy_from_udata(&req, udata,
1659 min(sizeof(req), udata->inlen)))
1662 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1663 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1664 &ucontext->cq_reg_mem_list);
1665 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1670 cqmr_buf = &iwpbl_buf->cq_mr;
1671 if (iwpbl_buf->pbl_allocated) {
1672 info.virtual_map = true;
1673 info.pbl_chunk_size = 1;
1674 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1676 info.cq_pa = cqmr_buf->cq_pbl.addr;
1679 /* Kmode CQ resize */
1682 rsize = info.cq_size * sizeof(struct irdma_cqe);
1683 kmem_buf.size = round_up(rsize, 256);
1684 kmem_buf.va = irdma_allocate_dma_mem(dev->hw, &kmem_buf,
1685 kmem_buf.size, 256);
1689 info.cq_base = kmem_buf.va;
1690 info.cq_pa = kmem_buf.pa;
1691 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1698 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1704 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1705 info.cq_resize = true;
1707 cqp_info = &cqp_request->info;
1708 m_info = &cqp_info->in.u.cq_modify.info;
1709 memcpy(m_info, &info, sizeof(*m_info));
1711 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1712 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1713 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1714 cqp_info->post_sq = 1;
1715 ret = irdma_handle_cqp_op(rf, cqp_request);
1716 irdma_put_cqp_request(&rf->cqp, cqp_request);
1720 spin_lock_irqsave(&iwcq->lock, flags);
1722 cq_buf->kmem_buf = iwcq->kmem;
1723 cq_buf->hw = dev->hw;
1724 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1725 INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1726 list_add_tail(&cq_buf->list, &iwcq->resize_list);
1727 iwcq->kmem = kmem_buf;
1730 irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1731 ibcq->cqe = info.cq_size - 1;
1732 spin_unlock_irqrestore(&iwcq->lock, flags);
1737 irdma_free_dma_mem(dev->hw, &kmem_buf);
1744 * irdma_get_mr_access - get hw MR access permissions from IB access flags
1745 * @access: IB access flags
1747 static inline u16 irdma_get_mr_access(int access){
1750 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
1751 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
1752 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
1753 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
1754 hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
1755 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
1756 hw_access |= (access & IB_ACCESS_MW_BIND) ?
1757 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
1758 hw_access |= (access & IB_ZERO_BASED) ?
1759 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
1760 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
1766 * irdma_free_stag - free stag resource
1767 * @iwdev: irdma device
1768 * @stag: stag to free
1771 irdma_free_stag(struct irdma_device *iwdev, u32 stag)
1775 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
1776 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
1780 * irdma_create_stag - create random stag
1781 * @iwdev: irdma device
1784 irdma_create_stag(struct irdma_device *iwdev)
1788 u32 next_stag_index;
1794 get_random_bytes(&random, sizeof(random));
1795 consumer_key = (u8)random;
1797 driver_key = random & ~iwdev->rf->mr_stagmask;
1798 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
1799 next_stag_index %= iwdev->rf->max_mr;
1801 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
1802 iwdev->rf->max_mr, &stag_index,
1806 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
1808 stag += (u32)consumer_key;
1814 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
1815 * @arr: lvl1 pbl array
1816 * @npages: page count
1817 * @pg_size: page size
1821 irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1825 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1826 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1834 * irdma_check_mr_contiguous - check if MR is physically contiguous
1835 * @palloc: pbl allocation struct
1836 * @pg_size: page size
1839 irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
1842 struct irdma_pble_level2 *lvl2 = &palloc->level2;
1843 struct irdma_pble_info *leaf = lvl2->leaf;
1845 u64 *start_addr = NULL;
1849 if (palloc->level == PBLE_LEVEL_1) {
1850 arr = palloc->level1.addr;
1851 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
1856 start_addr = leaf->addr;
1858 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1860 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1862 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
1871 * irdma_setup_pbles - copy user pg address to pble's
1872 * @rf: RDMA PCI function
1873 * @iwmr: mr pointer for this memory registration
1874 * @lvl: requested pble levels
1877 irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
1880 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1881 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1882 struct irdma_pble_info *pinfo;
1885 enum irdma_pble_level level = PBLE_LEVEL_1;
1888 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
1893 iwpbl->pbl_allocated = true;
1894 level = palloc->level;
1895 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
1896 palloc->level2.leaf;
1899 pbl = iwmr->pgaddrmem;
1902 irdma_copy_user_pgaddrs(iwmr, pbl, level);
1905 iwmr->pgaddrmem[0] = *pbl;
1911 * irdma_handle_q_mem - handle memory for qp and cq
1912 * @iwdev: irdma device
1913 * @req: information for q memory management
1914 * @iwpbl: pble struct
1915 * @lvl: pble level mask
1918 irdma_handle_q_mem(struct irdma_device *iwdev,
1919 struct irdma_mem_reg_req *req,
1920 struct irdma_pbl *iwpbl, u8 lvl)
1922 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1923 struct irdma_mr *iwmr = iwpbl->iwmr;
1924 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
1925 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
1926 struct irdma_hmc_pble *hmc_p;
1927 u64 *arr = iwmr->pgaddrmem;
1932 pg_size = iwmr->page_size;
1933 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
1938 arr = palloc->level1.addr;
1940 switch (iwmr->type) {
1941 case IRDMA_MEMREG_TYPE_QP:
1942 total = req->sq_pages + req->rq_pages;
1943 hmc_p = &qpmr->sq_pbl;
1944 qpmr->shadow = (dma_addr_t) arr[total];
1946 ret = irdma_check_mem_contiguous(arr, req->sq_pages,
1949 ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
1955 hmc_p->idx = palloc->level1.idx;
1956 hmc_p = &qpmr->rq_pbl;
1957 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1959 hmc_p->addr = arr[0];
1960 hmc_p = &qpmr->rq_pbl;
1961 hmc_p->addr = arr[req->sq_pages];
1964 case IRDMA_MEMREG_TYPE_CQ:
1965 hmc_p = &cqmr->cq_pbl;
1968 cqmr->shadow = (dma_addr_t) arr[req->cq_pages];
1971 ret = irdma_check_mem_contiguous(arr, req->cq_pages,
1975 hmc_p->idx = palloc->level1.idx;
1977 hmc_p->addr = arr[0];
1980 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "MR type error\n");
1985 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
1986 iwpbl->pbl_allocated = false;
1993 * irdma_hw_alloc_mw - create the hw memory window
1994 * @iwdev: irdma device
1995 * @iwmr: pointer to memory window info
1998 irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
2000 struct irdma_mw_alloc_info *info;
2001 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2002 struct irdma_cqp_request *cqp_request;
2003 struct cqp_cmds_info *cqp_info;
2006 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2010 cqp_info = &cqp_request->info;
2011 info = &cqp_info->in.u.mw_alloc.info;
2012 memset(info, 0, sizeof(*info));
2013 if (iwmr->ibmw.type == IB_MW_TYPE_1)
2014 info->mw_wide = true;
2016 info->page_size = PAGE_SIZE;
2017 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2018 info->pd_id = iwpd->sc_pd.pd_id;
2019 info->remote_access = true;
2020 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2021 cqp_info->post_sq = 1;
2022 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2023 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2024 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2025 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2031 * irdma_dealloc_mw - Dealloc memory window
2032 * @ibmw: memory window structure.
2035 irdma_dealloc_mw(struct ib_mw *ibmw)
2037 struct ib_pd *ibpd = ibmw->pd;
2038 struct irdma_pd *iwpd = to_iwpd(ibpd);
2039 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2040 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2041 struct irdma_cqp_request *cqp_request;
2042 struct cqp_cmds_info *cqp_info;
2043 struct irdma_dealloc_stag_info *info;
2045 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2049 cqp_info = &cqp_request->info;
2050 info = &cqp_info->in.u.dealloc_stag.info;
2051 memset(info, 0, sizeof(*info));
2052 info->pd_id = iwpd->sc_pd.pd_id;
2053 info->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S);
2055 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2056 cqp_info->post_sq = 1;
2057 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2058 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2059 irdma_handle_cqp_op(iwdev->rf, cqp_request);
2060 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2061 irdma_free_stag(iwdev, iwmr->stag);
2068 * irdma_hw_alloc_stag - cqp command to allocate stag
2069 * @iwdev: irdma device
2070 * @iwmr: irdma mr pointer
2073 irdma_hw_alloc_stag(struct irdma_device *iwdev,
2074 struct irdma_mr *iwmr)
2076 struct irdma_allocate_stag_info *info;
2077 struct ib_pd *pd = iwmr->ibmr.pd;
2078 struct irdma_pd *iwpd = to_iwpd(pd);
2079 struct irdma_cqp_request *cqp_request;
2080 struct cqp_cmds_info *cqp_info;
2083 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2087 cqp_info = &cqp_request->info;
2088 info = &cqp_info->in.u.alloc_stag.info;
2089 memset(info, 0, sizeof(*info));
2090 info->page_size = PAGE_SIZE;
2091 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2092 info->pd_id = iwpd->sc_pd.pd_id;
2093 info->total_len = iwmr->len;
2094 info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
2095 info->remote_access = true;
2096 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2097 cqp_info->post_sq = 1;
2098 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2099 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2100 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2101 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2109 * irdma_set_page - populate pbl list for fmr
2110 * @ibmr: ib mem to access iwarp mr pointer
2111 * @addr: page dma address fro pbl list
2114 irdma_set_page(struct ib_mr *ibmr, u64 addr)
2116 struct irdma_mr *iwmr = to_iwmr(ibmr);
2117 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2118 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2121 if (unlikely(iwmr->npages == iwmr->page_cnt))
2124 if (palloc->level == PBLE_LEVEL_2) {
2125 struct irdma_pble_info *palloc_info =
2126 palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
2128 palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
2130 pbl = palloc->level1.addr;
2131 pbl[iwmr->npages] = addr;
2139 * irdma_map_mr_sg - map of sg list for fmr
2140 * @ibmr: ib mem to access iwarp mr pointer
2141 * @sg: scatter gather list
2142 * @sg_nents: number of sg pages
2143 * @sg_offset: scatter gather list for fmr
2146 irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2147 int sg_nents, unsigned int *sg_offset)
2149 struct irdma_mr *iwmr = to_iwmr(ibmr);
2153 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2157 * irdma_hwreg_mr - send cqp command for memory registration
2158 * @iwdev: irdma device
2159 * @iwmr: irdma mr pointer
2160 * @access: access for MR
2163 irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2166 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2167 struct irdma_reg_ns_stag_info *stag_info;
2168 struct ib_pd *pd = iwmr->ibmr.pd;
2169 struct irdma_pd *iwpd = to_iwpd(pd);
2170 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2171 struct irdma_cqp_request *cqp_request;
2172 struct cqp_cmds_info *cqp_info;
2175 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2179 cqp_info = &cqp_request->info;
2180 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2181 memset(stag_info, 0, sizeof(*stag_info));
2182 stag_info->va = iwpbl->user_base;
2183 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2184 stag_info->stag_key = (u8)iwmr->stag;
2185 stag_info->total_len = iwmr->len;
2186 stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
2187 stag_info->access_rights = irdma_get_mr_access(access);
2188 stag_info->pd_id = iwpd->sc_pd.pd_id;
2189 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2190 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2192 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2193 stag_info->page_size = iwmr->page_size;
2195 if (iwpbl->pbl_allocated) {
2196 if (palloc->level == PBLE_LEVEL_1) {
2197 stag_info->first_pm_pbl_index = palloc->level1.idx;
2198 stag_info->chunk_size = 1;
2200 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2201 stag_info->chunk_size = 3;
2204 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2207 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2208 cqp_info->post_sq = 1;
2209 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2210 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2211 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2212 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2221 * irdma_reg_user_mr - Register a user memory region
2223 * @start: virtual start address
2224 * @len: length of mr
2225 * @virt: virtual address
2226 * @access: access of mr
2229 static struct ib_mr *
2230 irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2231 u64 virt, int access,
2232 struct ib_udata *udata)
2234 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
2235 struct irdma_device *iwdev = to_iwdev(pd->device);
2236 struct irdma_ucontext *ucontext;
2237 struct irdma_pble_alloc *palloc;
2238 struct irdma_pbl *iwpbl;
2239 struct irdma_mr *iwmr;
2240 struct ib_umem *region;
2241 struct irdma_mem_reg_req req = {};
2242 u32 total, stag = 0;
2243 u8 shadow_pgcnt = 1;
2244 unsigned long flags;
2249 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2250 return ERR_PTR(-EINVAL);
2252 if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
2253 return ERR_PTR(-EINVAL);
2255 region = ib_umem_get(pd->uobject->context, start, len, access, 0);
2257 if (IS_ERR(region)) {
2258 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
2259 "Failed to create ib_umem region\n");
2260 return (struct ib_mr *)region;
2263 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
2264 ib_umem_release(region);
2265 return ERR_PTR(-EFAULT);
2268 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2270 ib_umem_release(region);
2271 return ERR_PTR(-ENOMEM);
2274 iwpbl = &iwmr->iwpbl;
2276 iwmr->region = region;
2278 iwmr->ibmr.device = pd->device;
2279 iwmr->ibmr.iova = virt;
2280 iwmr->page_size = IRDMA_HW_PAGE_SIZE;
2281 iwmr->page_msk = ~(IRDMA_HW_PAGE_SIZE - 1);
2283 iwmr->len = region->length;
2284 iwpbl->user_base = virt;
2285 palloc = &iwpbl->pble_alloc;
2286 iwmr->type = req.reg_type;
2287 iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, virt);
2289 switch (req.reg_type) {
2290 case IRDMA_MEMREG_TYPE_QP:
2291 total = req.sq_pages + req.rq_pages + shadow_pgcnt;
2292 if (total > iwmr->page_cnt) {
2296 total = req.sq_pages + req.rq_pages;
2297 lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
2298 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
2302 #if __FreeBSD_version >= 1400026
2303 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
2305 ucontext = to_ucontext(pd->uobject->context);
2307 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2308 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2309 iwpbl->on_list = true;
2310 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2312 case IRDMA_MEMREG_TYPE_CQ:
2313 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2315 total = req.cq_pages + shadow_pgcnt;
2316 if (total > iwmr->page_cnt) {
2321 lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
2322 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
2326 #if __FreeBSD_version >= 1400026
2327 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
2329 ucontext = to_ucontext(pd->uobject->context);
2331 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2332 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2333 iwpbl->on_list = true;
2334 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2336 case IRDMA_MEMREG_TYPE_MEM:
2337 lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
2338 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2343 ret = irdma_check_mr_contiguous(palloc,
2346 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2347 iwpbl->pbl_allocated = false;
2351 stag = irdma_create_stag(iwdev);
2358 iwmr->ibmr.rkey = stag;
2359 iwmr->ibmr.lkey = stag;
2360 iwmr->access = access;
2361 err = irdma_hwreg_mr(iwdev, iwmr, access);
2363 irdma_free_stag(iwdev, stag);
2372 iwmr->type = req.reg_type;
2377 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2378 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2379 ib_umem_release(region);
2382 return ERR_PTR(err);
2386 irdma_hwdereg_mr(struct ib_mr *ib_mr)
2388 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
2389 struct irdma_mr *iwmr = to_iwmr(ib_mr);
2390 struct irdma_pd *iwpd = to_iwpd(ib_mr->pd);
2391 struct irdma_dealloc_stag_info *info;
2392 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2393 struct irdma_cqp_request *cqp_request;
2394 struct cqp_cmds_info *cqp_info;
2398 * Skip HW MR de-register when it is already de-registered during an MR re-reregister and the re-registration
2401 if (!iwmr->is_hwreg)
2404 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2408 cqp_info = &cqp_request->info;
2409 info = &cqp_info->in.u.dealloc_stag.info;
2410 memset(info, 0, sizeof(*info));
2411 info->pd_id = iwpd->sc_pd.pd_id;
2412 info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
2414 if (iwpbl->pbl_allocated)
2415 info->dealloc_pbl = true;
2417 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2418 cqp_info->post_sq = 1;
2419 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2420 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2421 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2422 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2431 * irdma_rereg_mr_trans - Re-register a user MR for a change translation. @iwmr: ptr of iwmr @start: virtual start
2432 * address @len: length of mr @virt: virtual address
2434 * Re-register a user memory region when a change translation is requested. Re-register a new region while reusing the
2435 * stag from the original registration.
2438 irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
2439 u64 virt, struct ib_udata *udata)
2441 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2442 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2443 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2444 struct ib_pd *pd = iwmr->ibmr.pd;
2445 struct ib_umem *region;
2449 region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0);
2451 if (IS_ERR(region)) {
2452 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
2453 "Failed to create ib_umem region\n");
2454 return (struct ib_mr *)region;
2457 iwmr->region = region;
2458 iwmr->ibmr.iova = virt;
2460 iwmr->page_size = PAGE_SIZE;
2462 iwmr->len = region->length;
2463 iwpbl->user_base = virt;
2464 iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size,
2467 lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
2469 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2474 err = irdma_check_mr_contiguous(palloc,
2477 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2478 iwpbl->pbl_allocated = false;
2482 err = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
2489 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) {
2490 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2491 iwpbl->pbl_allocated = false;
2493 ib_umem_release(region);
2494 iwmr->region = NULL;
2496 return ERR_PTR(err);
2500 * irdma_reg_phys_mr - register kernel physical memory
2502 * @addr: physical address of memory to register
2503 * @size: size of memory to register
2504 * @access: Access rights
2505 * @iova_start: start of virtual address for physical buffers
2508 irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
2511 struct irdma_device *iwdev = to_iwdev(pd->device);
2512 struct irdma_pbl *iwpbl;
2513 struct irdma_mr *iwmr;
2517 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2519 return ERR_PTR(-ENOMEM);
2522 iwmr->ibmr.device = pd->device;
2523 iwpbl = &iwmr->iwpbl;
2525 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2526 iwpbl->user_base = *iova_start;
2527 stag = irdma_create_stag(iwdev);
2534 iwmr->ibmr.iova = *iova_start;
2535 iwmr->ibmr.rkey = stag;
2536 iwmr->ibmr.lkey = stag;
2538 iwmr->pgaddrmem[0] = addr;
2540 iwmr->page_size = SZ_4K;
2541 ret = irdma_hwreg_mr(iwdev, iwmr, access);
2543 irdma_free_stag(iwdev, stag);
2552 return ERR_PTR(ret);
2556 * irdma_get_dma_mr - register physical mem
2558 * @acc: access for memory
2560 static struct ib_mr *
2561 irdma_get_dma_mr(struct ib_pd *pd, int acc)
2565 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
2569 * irdma_del_memlist - Deleting pbl list entries for CQ/QP
2570 * @iwmr: iwmr for IB's user page addresses
2571 * @ucontext: ptr to user context
2574 irdma_del_memlist(struct irdma_mr *iwmr,
2575 struct irdma_ucontext *ucontext)
2577 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2578 unsigned long flags;
2580 switch (iwmr->type) {
2581 case IRDMA_MEMREG_TYPE_CQ:
2582 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2583 if (iwpbl->on_list) {
2584 iwpbl->on_list = false;
2585 list_del(&iwpbl->list);
2587 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2589 case IRDMA_MEMREG_TYPE_QP:
2590 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2591 if (iwpbl->on_list) {
2592 iwpbl->on_list = false;
2593 list_del(&iwpbl->list);
2595 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2603 * irdma_copy_sg_list - copy sg list for qp
2604 * @sg_list: copied into sg_list
2605 * @sgl: copy from sgl
2606 * @num_sges: count of sg entries
2609 irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
2614 for (i = 0; i < num_sges; i++) {
2615 sg_list[i].tag_off = sgl[i].addr;
2616 sg_list[i].len = sgl[i].length;
2617 sg_list[i].stag = sgl[i].lkey;
2622 * irdma_post_send - kernel application wr
2623 * @ibqp: qp ptr for wr
2624 * @ib_wr: work request ptr
2625 * @bad_wr: return of bad wr if err
2628 irdma_post_send(struct ib_qp *ibqp,
2629 const struct ib_send_wr *ib_wr,
2630 const struct ib_send_wr **bad_wr)
2632 struct irdma_qp *iwqp;
2633 struct irdma_qp_uk *ukqp;
2634 struct irdma_sc_dev *dev;
2635 struct irdma_post_sq_info info;
2637 unsigned long flags;
2639 struct irdma_ah *ah;
2641 iwqp = to_iwqp(ibqp);
2642 ukqp = &iwqp->sc_qp.qp_uk;
2643 dev = &iwqp->iwdev->rf->sc_dev;
2645 spin_lock_irqsave(&iwqp->lock, flags);
2647 memset(&info, 0, sizeof(info));
2649 info.wr_id = (ib_wr->wr_id);
2650 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2651 info.signaled = true;
2652 if (ib_wr->send_flags & IB_SEND_FENCE)
2653 info.read_fence = true;
2654 switch (ib_wr->opcode) {
2655 case IB_WR_SEND_WITH_IMM:
2656 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
2657 info.imm_data_valid = true;
2658 info.imm_data = ntohl(ib_wr->ex.imm_data);
2665 case IB_WR_SEND_WITH_INV:
2666 if (ib_wr->opcode == IB_WR_SEND ||
2667 ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
2668 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2669 info.op_type = IRDMA_OP_TYPE_SEND_SOL;
2671 info.op_type = IRDMA_OP_TYPE_SEND;
2673 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2674 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
2676 info.op_type = IRDMA_OP_TYPE_SEND_INV;
2677 info.stag_to_inv = ib_wr->ex.invalidate_rkey;
2680 info.op.send.num_sges = ib_wr->num_sge;
2681 info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
2682 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
2683 iwqp->ibqp.qp_type == IB_QPT_GSI) {
2684 ah = to_iwah(ud_wr(ib_wr)->ah);
2685 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
2686 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
2687 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
2690 if (ib_wr->send_flags & IB_SEND_INLINE)
2691 err = irdma_uk_inline_send(ukqp, &info, false);
2693 err = irdma_uk_send(ukqp, &info, false);
2695 case IB_WR_RDMA_WRITE_WITH_IMM:
2696 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
2697 info.imm_data_valid = true;
2698 info.imm_data = ntohl(ib_wr->ex.imm_data);
2704 case IB_WR_RDMA_WRITE:
2705 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2706 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
2708 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
2710 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2711 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2712 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2713 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2714 if (ib_wr->send_flags & IB_SEND_INLINE)
2715 err = irdma_uk_inline_rdma_write(ukqp, &info, false);
2717 err = irdma_uk_rdma_write(ukqp, &info, false);
2719 case IB_WR_RDMA_READ_WITH_INV:
2722 case IB_WR_RDMA_READ:
2723 if (ib_wr->num_sge >
2724 dev->hw_attrs.uk_attrs.max_hw_read_sges) {
2728 info.op_type = IRDMA_OP_TYPE_RDMA_READ;
2729 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2730 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2731 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
2732 info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
2733 err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
2735 case IB_WR_LOCAL_INV:
2736 info.op_type = IRDMA_OP_TYPE_INV_STAG;
2737 info.local_fence = info.read_fence;
2738 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2739 err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
2742 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2743 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2744 struct irdma_fast_reg_stag_info stag_info = {0};
2746 stag_info.signaled = info.signaled;
2747 stag_info.read_fence = info.read_fence;
2748 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
2749 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
2750 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
2751 stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
2752 stag_info.wr_id = ib_wr->wr_id;
2753 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2754 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2755 stag_info.total_len = iwmr->ibmr.length;
2756 if (palloc->level == PBLE_LEVEL_2) {
2757 stag_info.chunk_size = 3;
2758 stag_info.first_pm_pbl_index = palloc->level2.root.idx;
2760 stag_info.chunk_size = 1;
2761 stag_info.first_pm_pbl_index = palloc->level1.idx;
2763 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2764 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
2770 irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
2771 "upost_send bad opcode = 0x%x\n",
2778 ib_wr = ib_wr->next;
2781 if (!iwqp->flush_issued) {
2782 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
2783 irdma_uk_qp_post_wr(ukqp);
2784 spin_unlock_irqrestore(&iwqp->lock, flags);
2786 spin_unlock_irqrestore(&iwqp->lock, flags);
2787 irdma_sched_qp_flush_work(iwqp);
2797 * irdma_post_recv - post receive wr for kernel application
2798 * @ibqp: ib qp pointer
2799 * @ib_wr: work request for receive
2800 * @bad_wr: bad wr caused an error
2803 irdma_post_recv(struct ib_qp *ibqp,
2804 const struct ib_recv_wr *ib_wr,
2805 const struct ib_recv_wr **bad_wr)
2807 struct irdma_qp *iwqp = to_iwqp(ibqp);
2808 struct irdma_qp_uk *ukqp = &iwqp->sc_qp.qp_uk;
2809 struct irdma_post_rq_info post_recv = {0};
2810 struct irdma_sge *sg_list = iwqp->sg_list;
2811 unsigned long flags;
2814 spin_lock_irqsave(&iwqp->lock, flags);
2817 if (ib_wr->num_sge > ukqp->max_rq_frag_cnt) {
2821 post_recv.num_sges = ib_wr->num_sge;
2822 post_recv.wr_id = ib_wr->wr_id;
2823 irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2824 post_recv.sg_list = sg_list;
2825 err = irdma_uk_post_receive(ukqp, &post_recv);
2827 irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
2828 "post_recv err %d\n",
2833 ib_wr = ib_wr->next;
2837 spin_unlock_irqrestore(&iwqp->lock, flags);
2838 if (iwqp->flush_issued)
2839 irdma_sched_qp_flush_work(iwqp);
2848 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
2849 * @opcode: iwarp flush code
2851 static enum ib_wc_status
2852 irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
2855 case FLUSH_PROT_ERR:
2856 return IB_WC_LOC_PROT_ERR;
2857 case FLUSH_REM_ACCESS_ERR:
2858 return IB_WC_REM_ACCESS_ERR;
2859 case FLUSH_LOC_QP_OP_ERR:
2860 return IB_WC_LOC_QP_OP_ERR;
2861 case FLUSH_REM_OP_ERR:
2862 return IB_WC_REM_OP_ERR;
2863 case FLUSH_LOC_LEN_ERR:
2864 return IB_WC_LOC_LEN_ERR;
2865 case FLUSH_GENERAL_ERR:
2866 return IB_WC_WR_FLUSH_ERR;
2867 case FLUSH_MW_BIND_ERR:
2868 return IB_WC_MW_BIND_ERR;
2869 case FLUSH_REM_INV_REQ_ERR:
2870 return IB_WC_REM_INV_REQ_ERR;
2871 case FLUSH_RETRY_EXC_ERR:
2872 return IB_WC_RETRY_EXC_ERR;
2873 case FLUSH_FATAL_ERR:
2875 return IB_WC_FATAL_ERR;
2880 set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
2881 struct ib_wc *entry)
2883 struct irdma_sc_qp *qp;
2885 switch (cq_poll_info->op_type) {
2886 case IRDMA_OP_TYPE_RDMA_WRITE:
2887 case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
2888 entry->opcode = IB_WC_RDMA_WRITE;
2890 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
2891 case IRDMA_OP_TYPE_RDMA_READ:
2892 entry->opcode = IB_WC_RDMA_READ;
2894 case IRDMA_OP_TYPE_SEND_SOL:
2895 case IRDMA_OP_TYPE_SEND_SOL_INV:
2896 case IRDMA_OP_TYPE_SEND_INV:
2897 case IRDMA_OP_TYPE_SEND:
2898 entry->opcode = IB_WC_SEND;
2900 case IRDMA_OP_TYPE_FAST_REG_NSMR:
2901 entry->opcode = IB_WC_REG_MR;
2903 case IRDMA_OP_TYPE_INV_STAG:
2904 entry->opcode = IB_WC_LOCAL_INV;
2907 qp = cq_poll_info->qp_handle;
2908 irdma_dev_err(to_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
2909 cq_poll_info->op_type);
2910 entry->status = IB_WC_GENERAL_ERR;
2915 set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
2916 struct ib_wc *entry, bool send_imm_support)
2919 * iWARP does not support sendImm, so the presence of Imm data
2922 if (!send_imm_support) {
2923 entry->opcode = cq_poll_info->imm_valid ?
2924 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
2927 switch (cq_poll_info->op_type) {
2928 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
2929 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
2930 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2933 entry->opcode = IB_WC_RECV;
2938 * irdma_process_cqe - process cqe info
2939 * @entry: processed cqe
2940 * @cq_poll_info: cqe info
2943 irdma_process_cqe(struct ib_wc *entry,
2944 struct irdma_cq_poll_info *cq_poll_info)
2946 struct irdma_sc_qp *qp;
2948 entry->wc_flags = 0;
2949 entry->pkey_index = 0;
2950 entry->wr_id = cq_poll_info->wr_id;
2952 qp = cq_poll_info->qp_handle;
2953 entry->qp = qp->qp_uk.back_qp;
2955 if (cq_poll_info->error) {
2956 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
2957 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
2959 entry->vendor_err = cq_poll_info->major_err << 16 |
2960 cq_poll_info->minor_err;
2962 entry->status = IB_WC_SUCCESS;
2963 if (cq_poll_info->imm_valid) {
2964 entry->ex.imm_data = htonl(cq_poll_info->imm_data);
2965 entry->wc_flags |= IB_WC_WITH_IMM;
2967 if (cq_poll_info->ud_smac_valid) {
2968 ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
2969 entry->wc_flags |= IB_WC_WITH_SMAC;
2972 if (cq_poll_info->ud_vlan_valid) {
2973 u16 vlan = cq_poll_info->ud_vlan & EVL_VLID_MASK;
2975 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
2977 entry->vlan_id = vlan;
2978 entry->wc_flags |= IB_WC_WITH_VLAN;
2985 if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
2986 set_ib_wc_op_sq(cq_poll_info, entry);
2988 set_ib_wc_op_rq(cq_poll_info, entry,
2989 qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
2991 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
2992 cq_poll_info->stag_invalid_set) {
2993 entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
2994 entry->wc_flags |= IB_WC_WITH_INVALIDATE;
2998 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
2999 entry->src_qp = cq_poll_info->ud_src_qpn;
3002 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
3003 entry->network_hdr_type = cq_poll_info->ipv4 ?
3007 entry->src_qp = cq_poll_info->qp_id;
3010 entry->byte_len = cq_poll_info->bytes_xfered;
3014 * irdma_poll_one - poll one entry of the CQ
3015 * @ukcq: ukcq to poll
3016 * @cur_cqe: current CQE info to be filled in
3017 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
3019 * Returns the internal irdma device error code or 0 on success
3022 irdma_poll_one(struct irdma_cq_uk *ukcq,
3023 struct irdma_cq_poll_info *cur_cqe,
3024 struct ib_wc *entry)
3026 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3031 irdma_process_cqe(entry, cur_cqe);
3037 * __irdma_poll_cq - poll cq for completion (kernel apps)
3039 * @num_entries: number of entries to poll
3040 * @entry: wr of a completed entry
3043 __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3045 struct list_head *tmp_node, *list_node;
3046 struct irdma_cq_buf *last_buf = NULL;
3047 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3048 struct irdma_cq_buf *cq_buf;
3050 struct irdma_device *iwdev;
3051 struct irdma_cq_uk *ukcq;
3052 bool cq_new_cqe = false;
3053 int resized_bufs = 0;
3056 iwdev = to_iwdev(iwcq->ibcq.device);
3057 ukcq = &iwcq->sc_cq.cq_uk;
3059 /* go through the list of previously resized CQ buffers */
3060 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3061 cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3062 while (npolled < num_entries) {
3063 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3071 /* QP using the CQ is destroyed. Skip reporting this CQE */
3072 if (ret == -EFAULT) {
3079 /* save the resized CQ buffer which received the last cqe */
3085 /* check the current CQ for new cqes */
3086 while (npolled < num_entries) {
3087 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3088 if (ret == -ENOENT) {
3089 ret = irdma_generated_cmpls(iwcq, cur_cqe);
3091 irdma_process_cqe(entry + npolled, cur_cqe);
3101 /* QP using the CQ is destroyed. Skip reporting this CQE */
3102 if (ret == -EFAULT) {
3110 /* all previous CQ resizes are complete */
3111 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3113 /* only CQ resizes up to the last_buf are complete */
3114 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3116 /* report to the HW the number of complete CQ resizes */
3117 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3121 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
3122 "%s: Error polling CQ, irdma_err: %d\n",
3129 * irdma_poll_cq - poll cq for completion (kernel apps)
3131 * @num_entries: number of entries to poll
3132 * @entry: wr of a completed entry
3135 irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3136 struct ib_wc *entry)
3138 struct irdma_cq *iwcq;
3139 unsigned long flags;
3142 iwcq = to_iwcq(ibcq);
3144 spin_lock_irqsave(&iwcq->lock, flags);
3145 ret = __irdma_poll_cq(iwcq, num_entries, entry);
3146 spin_unlock_irqrestore(&iwcq->lock, flags);
3152 * irdma_req_notify_cq - arm cq kernel application
3154 * @notify_flags: notofication flags
3157 irdma_req_notify_cq(struct ib_cq *ibcq,
3158 enum ib_cq_notify_flags notify_flags)
3160 struct irdma_cq *iwcq;
3161 struct irdma_cq_uk *ukcq;
3162 unsigned long flags;
3163 enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
3164 bool promo_event = false;
3167 iwcq = to_iwcq(ibcq);
3168 ukcq = &iwcq->sc_cq.cq_uk;
3170 spin_lock_irqsave(&iwcq->lock, flags);
3171 if (notify_flags == IB_CQ_SOLICITED) {
3172 cq_notify = IRDMA_CQ_COMPL_SOLICITED;
3174 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED)
3178 if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
3179 iwcq->last_notify = cq_notify;
3180 irdma_uk_cq_request_notification(ukcq, cq_notify);
3183 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3184 (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
3186 spin_unlock_irqrestore(&iwcq->lock, flags);
3192 * mcast_list_add - Add a new mcast item to list
3193 * @rf: RDMA PCI function
3194 * @new_elem: pointer to element to add
3197 mcast_list_add(struct irdma_pci_f *rf,
3198 struct mc_table_list *new_elem)
3200 list_add(&new_elem->list, &rf->mc_qht_list.list);
3204 * mcast_list_del - Remove an mcast item from list
3205 * @mc_qht_elem: pointer to mcast table list element
3208 mcast_list_del(struct mc_table_list *mc_qht_elem)
3211 list_del(&mc_qht_elem->list);
3215 * mcast_list_lookup_ip - Search mcast list for address
3216 * @rf: RDMA PCI function
3217 * @ip_mcast: pointer to mcast IP address
3219 static struct mc_table_list *
3220 mcast_list_lookup_ip(struct irdma_pci_f *rf,
3223 struct mc_table_list *mc_qht_el;
3224 struct list_head *pos, *q;
3226 list_for_each_safe(pos, q, &rf->mc_qht_list.list) {
3227 mc_qht_el = list_entry(pos, struct mc_table_list, list);
3228 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
3229 sizeof(mc_qht_el->mc_info.dest_ip)))
3237 * irdma_mcast_cqp_op - perform a mcast cqp operation
3238 * @iwdev: irdma device
3239 * @mc_grp_ctx: mcast group info
3242 * returns error status
3245 irdma_mcast_cqp_op(struct irdma_device *iwdev,
3246 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
3248 struct cqp_cmds_info *cqp_info;
3249 struct irdma_cqp_request *cqp_request;
3252 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3256 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
3257 cqp_info = &cqp_request->info;
3258 cqp_info->cqp_cmd = op;
3259 cqp_info->post_sq = 1;
3260 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
3261 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3262 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3263 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3269 * irdma_attach_mcast - attach a qp to a multicast group
3271 * @ibgid: pointer to global ID
3274 * returns error status
3277 irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3279 struct irdma_qp *iwqp = to_iwqp(ibqp);
3280 struct irdma_device *iwdev = iwqp->iwdev;
3281 struct irdma_pci_f *rf = iwdev->rf;
3282 struct mc_table_list *mc_qht_elem;
3283 struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
3284 unsigned long flags;
3285 u32 ip_addr[4] = {0};
3292 struct sockaddr saddr;
3293 struct sockaddr_in saddr_in;
3294 struct sockaddr_in6 saddr_in6;
3296 unsigned char dmac[ETH_ALEN];
3298 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3300 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
3301 irdma_copy_ip_ntohl(ip_addr,
3302 sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
3303 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
3305 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
3306 "qp_id=%d, IP6address=%pI6\n",
3309 irdma_mcast_mac_v6(ip_addr, dmac);
3311 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3313 vlan_id = irdma_get_vlan_ipv4(ip_addr);
3314 irdma_mcast_mac_v4(ip_addr, dmac);
3315 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
3316 "qp_id=%d, IP4address=%pI4, MAC=%pM\n",
3317 ibqp->qp_num, ip_addr, dmac);
3320 spin_lock_irqsave(&rf->qh_list_lock, flags);
3321 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3323 struct irdma_dma_mem *dma_mem_mc;
3325 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3326 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
3330 mc_qht_elem->mc_info.ipv4_valid = ipv4;
3331 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
3332 sizeof(mc_qht_elem->mc_info.dest_ip));
3333 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
3334 &mgn, &rf->next_mcg);
3340 mc_qht_elem->mc_info.mgn = mgn;
3341 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
3342 dma_mem_mc->size = sizeof(u64)* IRDMA_MAX_MGS_PER_CTX;
3343 dma_mem_mc->va = irdma_allocate_dma_mem(&rf->hw, dma_mem_mc,
3345 IRDMA_HW_PAGE_SIZE);
3346 if (!dma_mem_mc->va) {
3347 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
3352 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
3353 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
3354 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
3355 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
3356 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
3357 if (vlan_id < VLAN_N_VID)
3358 mc_qht_elem->mc_grp_ctx.vlan_valid = true;
3359 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
3360 mc_qht_elem->mc_grp_ctx.qs_handle =
3361 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
3362 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
3364 spin_lock_irqsave(&rf->qh_list_lock, flags);
3365 mcast_list_add(rf, mc_qht_elem);
3367 if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
3368 IRDMA_MAX_MGS_PER_CTX) {
3369 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3374 mcg_info.qp_id = iwqp->ibqp.qp_num;
3375 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
3376 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3377 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3379 /* Only if there is a change do we need to modify or create */
3381 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3382 IRDMA_OP_MC_CREATE);
3383 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3384 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3385 IRDMA_OP_MC_MODIFY);
3396 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3397 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3398 mcast_list_del(mc_qht_elem);
3399 irdma_free_dma_mem(&rf->hw,
3400 &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
3401 irdma_free_rsrc(rf, rf->allocated_mcgs,
3402 mc_qht_elem->mc_grp_ctx.mg_id);
3410 * irdma_detach_mcast - detach a qp from a multicast group
3412 * @ibgid: pointer to global ID
3415 * returns error status
3418 irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3420 struct irdma_qp *iwqp = to_iwqp(ibqp);
3421 struct irdma_device *iwdev = iwqp->iwdev;
3422 struct irdma_pci_f *rf = iwdev->rf;
3423 u32 ip_addr[4] = {0};
3424 struct mc_table_list *mc_qht_elem;
3425 struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
3427 unsigned long flags;
3429 struct sockaddr saddr;
3430 struct sockaddr_in saddr_in;
3431 struct sockaddr_in6 saddr_in6;
3434 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3435 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
3436 irdma_copy_ip_ntohl(ip_addr,
3437 sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
3439 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3441 spin_lock_irqsave(&rf->qh_list_lock, flags);
3442 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3444 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3445 irdma_debug(&iwdev->rf->sc_dev,
3446 IRDMA_DEBUG_VERBS, "address not found MCG\n");
3450 mcg_info.qp_id = iwqp->ibqp.qp_num;
3451 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3452 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3453 mcast_list_del(mc_qht_elem);
3454 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3455 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3456 IRDMA_OP_MC_DESTROY);
3458 irdma_debug(&iwdev->rf->sc_dev,
3459 IRDMA_DEBUG_VERBS, "failed MC_DESTROY MCG\n");
3460 spin_lock_irqsave(&rf->qh_list_lock, flags);
3461 mcast_list_add(rf, mc_qht_elem);
3462 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3466 irdma_free_dma_mem(&rf->hw,
3467 &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
3468 irdma_free_rsrc(rf, rf->allocated_mcgs,
3469 mc_qht_elem->mc_grp_ctx.mg_id);
3472 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3473 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3474 IRDMA_OP_MC_MODIFY);
3476 irdma_debug(&iwdev->rf->sc_dev,
3477 IRDMA_DEBUG_VERBS, "failed Modify MCG\n");
3486 * irdma_query_ah - Query address handle
3487 * @ibah: pointer to address handle
3488 * @ah_attr: address handle attributes
3491 irdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
3493 struct irdma_ah *ah = to_iwah(ibah);
3495 memset(ah_attr, 0, sizeof(*ah_attr));
3496 if (ah->av.attrs.ah_flags & IB_AH_GRH) {
3497 ah_attr->ah_flags = IB_AH_GRH;
3498 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
3499 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
3500 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
3501 ah_attr->grh.sgid_index = ah->sgid_index;
3502 ah_attr->grh.sgid_index = ah->sgid_index;
3503 memcpy(&ah_attr->grh.dgid, &ah->dgid,
3504 sizeof(ah_attr->grh.dgid));
3511 irdma_get_netdev(struct ib_device *ibdev, u8 port_num)
3513 struct irdma_device *iwdev = to_iwdev(ibdev);
3515 if (iwdev->netdev) {
3516 dev_hold(iwdev->netdev);
3517 return iwdev->netdev;
3524 irdma_set_device_ops(struct ib_device *ibdev)
3526 struct ib_device *dev_ops = ibdev;
3528 #if __FreeBSD_version >= 1400000
3529 dev_ops->ops.driver_id = RDMA_DRIVER_I40IW;
3530 dev_ops->ops.size_ib_ah = IRDMA_SET_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah);
3531 dev_ops->ops.size_ib_cq = IRDMA_SET_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq);
3532 dev_ops->ops.size_ib_pd = IRDMA_SET_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd);
3533 dev_ops->ops.size_ib_ucontext = IRDMA_SET_RDMA_OBJ_SIZE(ib_ucontext,
3537 #endif /* __FreeBSD_version >= 1400000 */
3538 dev_ops->alloc_hw_stats = irdma_alloc_hw_stats;
3539 dev_ops->alloc_mr = irdma_alloc_mr;
3540 dev_ops->alloc_mw = irdma_alloc_mw;
3541 dev_ops->alloc_pd = irdma_alloc_pd;
3542 dev_ops->alloc_ucontext = irdma_alloc_ucontext;
3543 dev_ops->create_cq = irdma_create_cq;
3544 dev_ops->create_qp = irdma_create_qp;
3545 dev_ops->dealloc_mw = irdma_dealloc_mw;
3546 dev_ops->dealloc_pd = irdma_dealloc_pd;
3547 dev_ops->dealloc_ucontext = irdma_dealloc_ucontext;
3548 dev_ops->dereg_mr = irdma_dereg_mr;
3549 dev_ops->destroy_cq = irdma_destroy_cq;
3550 dev_ops->destroy_qp = irdma_destroy_qp;
3551 dev_ops->disassociate_ucontext = irdma_disassociate_ucontext;
3552 dev_ops->get_dev_fw_str = irdma_get_dev_fw_str;
3553 dev_ops->get_dma_mr = irdma_get_dma_mr;
3554 dev_ops->get_hw_stats = irdma_get_hw_stats;
3555 dev_ops->get_netdev = irdma_get_netdev;
3556 dev_ops->map_mr_sg = irdma_map_mr_sg;
3557 dev_ops->mmap = irdma_mmap;
3558 #if __FreeBSD_version >= 1400026
3559 dev_ops->mmap_free = irdma_mmap_free;
3561 dev_ops->poll_cq = irdma_poll_cq;
3562 dev_ops->post_recv = irdma_post_recv;
3563 dev_ops->post_send = irdma_post_send;
3564 dev_ops->query_device = irdma_query_device;
3565 dev_ops->query_port = irdma_query_port;
3566 dev_ops->modify_port = irdma_modify_port;
3567 dev_ops->query_qp = irdma_query_qp;
3568 dev_ops->reg_user_mr = irdma_reg_user_mr;
3569 dev_ops->rereg_user_mr = irdma_rereg_user_mr;
3570 dev_ops->req_notify_cq = irdma_req_notify_cq;
3571 dev_ops->resize_cq = irdma_resize_cq;
3575 irdma_set_device_mcast_ops(struct ib_device *ibdev)
3577 struct ib_device *dev_ops = ibdev;
3578 dev_ops->attach_mcast = irdma_attach_mcast;
3579 dev_ops->detach_mcast = irdma_detach_mcast;
3583 irdma_set_device_roce_ops(struct ib_device *ibdev)
3585 struct ib_device *dev_ops = ibdev;
3586 dev_ops->create_ah = irdma_create_ah;
3587 dev_ops->destroy_ah = irdma_destroy_ah;
3588 dev_ops->get_link_layer = irdma_get_link_layer;
3589 dev_ops->get_port_immutable = irdma_roce_port_immutable;
3590 dev_ops->modify_qp = irdma_modify_qp_roce;
3591 dev_ops->query_ah = irdma_query_ah;
3592 dev_ops->query_gid = irdma_query_gid_roce;
3593 dev_ops->query_pkey = irdma_query_pkey;
3594 ibdev->add_gid = irdma_add_gid;
3595 ibdev->del_gid = irdma_del_gid;
3599 irdma_set_device_iw_ops(struct ib_device *ibdev)
3601 struct ib_device *dev_ops = ibdev;
3603 ibdev->uverbs_cmd_mask |=
3604 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
3605 (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
3607 dev_ops->create_ah = irdma_create_ah_stub;
3608 dev_ops->destroy_ah = irdma_destroy_ah_stub;
3609 dev_ops->get_port_immutable = irdma_iw_port_immutable;
3610 dev_ops->modify_qp = irdma_modify_qp;
3611 dev_ops->query_gid = irdma_query_gid;
3612 dev_ops->query_pkey = irdma_iw_query_pkey;
3616 irdma_set_device_gen1_ops(struct ib_device *ibdev)
3621 * irdma_init_roce_device - initialization of roce rdma device
3622 * @iwdev: irdma device
3625 irdma_init_roce_device(struct irdma_device *iwdev)
3627 kc_set_roce_uverbs_cmd_mask(iwdev);
3628 iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
3629 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
3630 if_getlladdr(iwdev->netdev));
3631 irdma_set_device_roce_ops(&iwdev->ibdev);
3632 if (iwdev->rf->rdma_ver == IRDMA_GEN_2)
3633 irdma_set_device_mcast_ops(&iwdev->ibdev);
3637 * irdma_init_iw_device - initialization of iwarp rdma device
3638 * @iwdev: irdma device
3641 irdma_init_iw_device(struct irdma_device *iwdev)
3643 if_t netdev = iwdev->netdev;
3645 iwdev->ibdev.node_type = RDMA_NODE_RNIC;
3646 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
3647 if_getlladdr(netdev));
3648 iwdev->ibdev.iwcm = kzalloc(sizeof(*iwdev->ibdev.iwcm), GFP_KERNEL);
3649 if (!iwdev->ibdev.iwcm)
3652 iwdev->ibdev.iwcm->add_ref = irdma_qp_add_ref;
3653 iwdev->ibdev.iwcm->rem_ref = irdma_qp_rem_ref;
3654 iwdev->ibdev.iwcm->get_qp = irdma_get_qp;
3655 iwdev->ibdev.iwcm->connect = irdma_connect;
3656 iwdev->ibdev.iwcm->accept = irdma_accept;
3657 iwdev->ibdev.iwcm->reject = irdma_reject;
3658 iwdev->ibdev.iwcm->create_listen = irdma_create_listen;
3659 iwdev->ibdev.iwcm->destroy_listen = irdma_destroy_listen;
3660 memcpy(iwdev->ibdev.iwcm->ifname, if_name(netdev),
3661 sizeof(iwdev->ibdev.iwcm->ifname));
3662 irdma_set_device_iw_ops(&iwdev->ibdev);
3668 * irdma_init_rdma_device - initialization of rdma device
3669 * @iwdev: irdma device
3672 irdma_init_rdma_device(struct irdma_device *iwdev)
3676 iwdev->ibdev.owner = THIS_MODULE;
3677 iwdev->ibdev.uverbs_abi_ver = IRDMA_ABI_VER;
3678 kc_set_rdma_uverbs_cmd_mask(iwdev);
3680 if (iwdev->roce_mode) {
3681 irdma_init_roce_device(iwdev);
3683 ret = irdma_init_iw_device(iwdev);
3688 iwdev->ibdev.phys_port_cnt = 1;
3689 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
3690 iwdev->ibdev.dev.parent = iwdev->rf->dev_ctx.dev;
3691 set_ibdev_dma_device(iwdev->ibdev, &iwdev->rf->pcidev->dev);
3692 irdma_set_device_ops(&iwdev->ibdev);
3693 if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
3694 irdma_set_device_gen1_ops(&iwdev->ibdev);
3700 * irdma_port_ibevent - indicate port event
3701 * @iwdev: irdma device
3704 irdma_port_ibevent(struct irdma_device *iwdev)
3706 struct ib_event event;
3708 event.device = &iwdev->ibdev;
3709 event.element.port_num = 1;
3711 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3712 ib_dispatch_event(&event);
3716 * irdma_ib_unregister_device - unregister rdma device from IB
3718 * @iwdev: irdma device
3721 irdma_ib_unregister_device(struct irdma_device *iwdev)
3723 iwdev->iw_status = 0;
3724 irdma_port_ibevent(iwdev);
3725 ib_unregister_device(&iwdev->ibdev);
3726 dev_put(iwdev->netdev);
3727 kfree(iwdev->ibdev.iwcm);
3728 iwdev->ibdev.iwcm = NULL;
3732 * irdma_ib_register_device - register irdma device to IB core
3733 * @iwdev: irdma device
3736 irdma_ib_register_device(struct irdma_device *iwdev)
3740 ret = irdma_init_rdma_device(iwdev);
3744 dev_hold(iwdev->netdev);
3745 sprintf(iwdev->ibdev.name, "irdma-%s", if_name(iwdev->netdev));
3746 ret = ib_register_device(&iwdev->ibdev, NULL);
3750 iwdev->iw_status = 1;
3751 irdma_port_ibevent(iwdev);
3756 kfree(iwdev->ibdev.iwcm);
3757 iwdev->ibdev.iwcm = NULL;
3758 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "Register RDMA device fail\n");