2 * Copyright (c) 2018-2019 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "qlnxr_def.h"
32 #include "rdma_common.h"
36 qlnxr_inc_sw_gsi_cons(struct qlnxr_qp_hwq_info *info)
38 info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
42 qlnxr_store_gsi_qp_cq(struct qlnxr_dev *dev,
44 struct ib_qp_init_attr *attrs)
46 QL_DPRINT12(dev->ha, "enter\n");
48 dev->gsi_qp_created = 1;
49 dev->gsi_sqcq = get_qlnxr_cq((attrs->send_cq));
50 dev->gsi_rqcq = get_qlnxr_cq((attrs->recv_cq));
53 QL_DPRINT12(dev->ha, "exit\n");
59 qlnxr_ll2_complete_tx_packet(void *cxt,
60 uint8_t connection_handle,
62 dma_addr_t first_frag_addr,
66 struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
67 struct ecore_roce_ll2_packet *pkt = cookie;
68 struct qlnxr_cq *cq = dev->gsi_sqcq;
69 struct qlnxr_qp *qp = dev->gsi_qp;
72 QL_DPRINT12(dev->ha, "enter\n");
74 qlnx_dma_free_coherent(&dev->ha->cdev, pkt->header.vaddr,
75 pkt->header.baddr, pkt->header.len);
78 spin_lock_irqsave(&qp->q_lock, flags);
80 qlnxr_inc_sw_gsi_cons(&qp->sq);
82 spin_unlock_irqrestore(&qp->q_lock, flags);
84 if (cq->ibcq.comp_handler)
85 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
87 QL_DPRINT12(dev->ha, "exit\n");
93 qlnxr_ll2_complete_rx_packet(void *cxt,
94 struct ecore_ll2_comp_rx_data *data)
96 struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
97 struct qlnxr_cq *cq = dev->gsi_rqcq;
98 // struct qlnxr_qp *qp = dev->gsi_qp;
99 struct qlnxr_qp *qp = NULL;
102 // uint32_t delay_count = 0, gsi_cons = 0;
105 QL_DPRINT12(dev->ha, "enter\n");
107 if (data->u.data_length_error) {
108 /* TODO: add statistic */
111 if (data->cookie == NULL) {
112 QL_DPRINT12(dev->ha, "cookie is NULL, bad sign\n");
115 qp_num = (0xFF << 16) | data->qp_id;
117 if (data->qp_id == 1) {
120 /* TODO: This will be needed for UD QP support */
121 /* For RoCEv1 this is invalid */
122 QL_DPRINT12(dev->ha, "invalid QP\n");
125 /* note: currently only one recv sg is supported */
126 QL_DPRINT12(dev->ha, "MAD received on QP : %x\n", data->rx_buf_addr);
128 spin_lock_irqsave(&qp->q_lock, flags);
130 qp->rqe_wr_id[qp->rq.gsi_cons].rc =
131 data->u.data_length_error ? -EINVAL : 0;
132 qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
133 /* note: length stands for data length i.e. GRH is excluded */
134 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
135 data->length.data_length;
136 *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
137 ntohl(data->opaque_data_0);
138 *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
139 ntohs((u16)data->opaque_data_1);
141 qlnxr_inc_sw_gsi_cons(&qp->rq);
143 spin_unlock_irqrestore(&qp->q_lock, flags);
145 if (cq->ibcq.comp_handler)
146 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
148 QL_DPRINT12(dev->ha, "exit\n");
153 void qlnxr_ll2_release_rx_packet(void *cxt,
154 u8 connection_handle,
156 dma_addr_t rx_buf_addr,
163 qlnxr_destroy_gsi_cq(struct qlnxr_dev *dev,
164 struct ib_qp_init_attr *attrs)
166 struct ecore_rdma_destroy_cq_in_params iparams;
167 struct ecore_rdma_destroy_cq_out_params oparams;
170 QL_DPRINT12(dev->ha, "enter\n");
172 cq = get_qlnxr_cq((attrs->send_cq));
173 iparams.icid = cq->icid;
174 ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
175 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
177 cq = get_qlnxr_cq((attrs->recv_cq));
178 /* if a dedicated recv_cq was used, delete it too */
179 if (iparams.icid != cq->icid) {
180 iparams.icid = cq->icid;
181 ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
182 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
185 QL_DPRINT12(dev->ha, "exit\n");
191 qlnxr_check_gsi_qp_attrs(struct qlnxr_dev *dev,
192 struct ib_qp_init_attr *attrs)
194 QL_DPRINT12(dev->ha, "enter\n");
196 if (attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE) {
198 "(attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE)\n");
202 if (attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR) {
204 "(attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR)\n");
208 if (attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR) {
210 "(attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR)\n");
214 QL_DPRINT12(dev->ha, "exit\n");
220 qlnxr_ll2_post_tx(struct qlnxr_dev *dev, struct ecore_roce_ll2_packet *pkt)
222 enum ecore_ll2_roce_flavor_type roce_flavor;
223 struct ecore_ll2_tx_pkt_info ll2_tx_pkt;
227 QL_DPRINT12(dev->ha, "enter\n");
229 memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
231 if (pkt->roce_mode != ROCE_V1) {
232 QL_DPRINT11(dev->ha, "roce_mode != ROCE_V1\n");
236 roce_flavor = (pkt->roce_mode == ROCE_V1) ?
237 ECORE_LL2_ROCE : ECORE_LL2_RROCE;
239 ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg;
240 ll2_tx_pkt.vlan = 0; /* ??? */
241 ll2_tx_pkt.tx_dest = ECORE_LL2_TX_DEST_NW;
242 ll2_tx_pkt.ecore_roce_flavor = roce_flavor;
243 ll2_tx_pkt.first_frag = pkt->header.baddr;
244 ll2_tx_pkt.first_frag_len = pkt->header.len;
245 ll2_tx_pkt.cookie = pkt;
246 ll2_tx_pkt.enable_ip_cksum = 1; // Only for RoCEv2:IPv4
249 rc = ecore_ll2_prepare_tx_packet(dev->rdma_ctx,
254 QL_DPRINT11(dev->ha, "ecore_ll2_prepare_tx_packet failed\n");
256 /* TX failed while posting header - release resources*/
257 qlnx_dma_free_coherent(&dev->ha->cdev,
268 for (i = 0; i < pkt->n_seg; i++) {
269 rc = ecore_ll2_set_fragment_of_tx_packet(dev->rdma_ctx,
271 pkt->payload[i].baddr,
272 pkt->payload[i].len);
274 /* if failed not much to do here, partial packet has
275 * been posted we can't free memory, will need to wait
279 "ecore_ll2_set_fragment_of_tx_packet failed\n");
283 struct ecore_ll2_stats stats = {0};
284 rc = ecore_ll2_get_stats(dev->rdma_ctx, dev->gsi_ll2_handle, &stats);
286 QL_DPRINT11(dev->ha, "failed to obtain ll2 stats\n");
288 QL_DPRINT12(dev->ha, "exit\n");
294 qlnxr_ll2_stop(struct qlnxr_dev *dev)
298 QL_DPRINT12(dev->ha, "enter\n");
300 if (dev->gsi_ll2_handle == 0xFF)
303 /* remove LL2 MAC address filter */
304 rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx,
305 dev->gsi_ll2_mac_address, NULL);
307 rc = ecore_ll2_terminate_connection(dev->rdma_ctx,
308 dev->gsi_ll2_handle);
310 ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
312 dev->gsi_ll2_handle = 0xFF;
314 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
318 int qlnxr_ll2_start(struct qlnxr_dev *dev,
319 struct ib_qp_init_attr *attrs,
322 struct ecore_ll2_acquire_data data;
323 struct ecore_ll2_cbs cbs;
326 QL_DPRINT12(dev->ha, "enter\n");
328 /* configure and start LL2 */
329 cbs.rx_comp_cb = qlnxr_ll2_complete_rx_packet;
330 cbs.tx_comp_cb = qlnxr_ll2_complete_tx_packet;
331 cbs.rx_release_cb = qlnxr_ll2_release_rx_packet;
332 cbs.tx_release_cb = qlnxr_ll2_complete_tx_packet;
334 dev->gsi_ll2_handle = 0xFF;
336 memset(&data, 0, sizeof(data));
337 data.input.conn_type = ECORE_LL2_TYPE_ROCE;
338 data.input.mtu = dev->ha->ifp->if_mtu;
339 data.input.rx_num_desc = 8 * 1024;
340 data.input.rx_drop_ttl0_flg = 1;
341 data.input.rx_vlan_removal_en = 0;
342 data.input.tx_num_desc = 8 * 1024;
343 data.input.tx_tc = 0;
344 data.input.tx_dest = ECORE_LL2_TX_DEST_NW;
345 data.input.ai_err_packet_too_big = ECORE_LL2_DROP_PACKET;
346 data.input.ai_err_no_buf = ECORE_LL2_DROP_PACKET;
347 data.input.gsi_enable = 1;
348 data.p_connection_handle = &dev->gsi_ll2_handle;
351 rc = ecore_ll2_acquire_connection(dev->rdma_ctx, &data);
355 "ecore_ll2_acquire_connection failed: %d\n",
361 "ll2 connection acquired successfully\n");
362 rc = ecore_ll2_establish_connection(dev->rdma_ctx,
363 dev->gsi_ll2_handle);
367 "ecore_ll2_establish_connection failed\n", rc);
372 "ll2 connection established successfully\n");
373 rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx, NULL,
374 dev->ha->primary_mac);
376 QL_DPRINT11(dev->ha, "qlnx_rdma_ll2_set_mac_filter failed\n", rc);
380 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
384 ecore_ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
386 ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
388 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
393 qlnxr_create_gsi_qp(struct qlnxr_dev *dev,
394 struct ib_qp_init_attr *attrs,
399 QL_DPRINT12(dev->ha, "enter\n");
401 rc = qlnxr_check_gsi_qp_attrs(dev, attrs);
404 QL_DPRINT11(dev->ha, "qlnxr_check_gsi_qp_attrs failed\n");
408 rc = qlnxr_ll2_start(dev, attrs, qp);
410 QL_DPRINT11(dev->ha, "qlnxr_ll2_start failed\n");
416 qp->rq.max_wr = attrs->cap.max_recv_wr;
417 qp->sq.max_wr = attrs->cap.max_send_wr;
419 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
421 if (!qp->rqe_wr_id) {
422 QL_DPRINT11(dev->ha, "(!qp->rqe_wr_id)\n");
426 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
428 if (!qp->wqe_wr_id) {
429 QL_DPRINT11(dev->ha, "(!qp->wqe_wr_id)\n");
433 qlnxr_store_gsi_qp_cq(dev, qp, attrs);
434 memcpy(dev->gsi_ll2_mac_address, dev->ha->primary_mac, ETH_ALEN);
436 /* the GSI CQ is handled by the driver so remove it from the FW */
437 qlnxr_destroy_gsi_cq(dev, attrs);
438 dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
439 dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
441 QL_DPRINT12(dev->ha, "exit &qp->ibqp = %p\n", &qp->ibqp);
445 kfree(qp->rqe_wr_id);
447 rc = qlnxr_ll2_stop(dev);
449 QL_DPRINT12(dev->ha, "exit with error\n");
451 return ERR_PTR(-ENOMEM);
455 qlnxr_destroy_gsi_qp(struct qlnxr_dev *dev)
459 QL_DPRINT12(dev->ha, "enter\n");
461 rc = qlnxr_ll2_stop(dev);
463 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
468 qlnxr_get_vlan_id_gsi(struct ib_ah_attr *ah_attr, u16 *vlan_id)
471 union ib_gid *dgid = &ah_attr->grh.dgid;
473 tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
474 if (tmp_vlan_id < 0x1000) {
475 *vlan_id = tmp_vlan_id;
483 #define QLNXR_MAX_UD_HEADER_SIZE (100)
484 #define QLNXR_GSI_QPN (1)
486 qlnxr_gsi_build_header(struct qlnxr_dev *dev,
488 const struct ib_send_wr *swr,
489 struct ib_ud_header *udh,
492 bool has_vlan = false, has_grh_ipv6 = true;
493 struct ib_ah_attr *ah_attr = &get_qlnxr_ah((ud_wr(swr)->ah))->attr;
494 struct ib_global_route *grh = &ah_attr->grh;
500 #if __FreeBSD_version >= 1102000
503 bool has_udp = false;
504 #endif /* #if __FreeBSD_version >= 1102000 */
506 #if !DEFINE_IB_AH_ATTR_WITH_DMAC
512 for (i = 0; i < swr->num_sge; ++i)
513 send_size += swr->sg_list[i].length;
515 has_vlan = qlnxr_get_vlan_id_gsi(ah_attr, &vlan_id);
516 ether_type = ETH_P_ROCE;
517 *roce_mode = ROCE_V1;
518 if (grh->sgid_index < QLNXR_MAX_SGID)
519 sgid = dev->sgid_tbl[grh->sgid_index];
521 sgid = dev->sgid_tbl[0];
523 #if __FreeBSD_version >= 1102000
525 rc = ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
526 has_vlan, has_grh_ipv6, ip_ver, has_udp,
527 0 /* immediate */, udh);
530 QL_DPRINT11(dev->ha, "gsi post send: failed to init header\n");
535 ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
536 has_vlan, has_grh_ipv6, 0 /* immediate */, udh);
538 #endif /* #if __FreeBSD_version >= 1102000 */
540 /* ENET + VLAN headers*/
541 #if DEFINE_IB_AH_ATTR_WITH_DMAC
542 memcpy(udh->eth.dmac_h, ah_attr->dmac, ETH_ALEN);
544 qlnxr_get_dmac(dev, ah_attr, mac);
545 memcpy(udh->eth.dmac_h, mac, ETH_ALEN);
547 memcpy(udh->eth.smac_h, dev->ha->primary_mac, ETH_ALEN);
549 udh->eth.type = htons(ETH_P_8021Q);
550 udh->vlan.tag = htons(vlan_id);
551 udh->vlan.type = htons(ether_type);
553 udh->eth.type = htons(ether_type);
556 for (int j = 0; j < 4; j++) {
557 QL_DPRINT12(dev->ha, "destination mac: %x\n",
560 for (int j = 0; j < 4; j++) {
561 QL_DPRINT12(dev->ha, "source mac: %x\n",
565 QL_DPRINT12(dev->ha, "QP: %p, opcode: %d, wq: %lx, roce: %x, hops:%d,"
566 "imm : %d, vlan :%d, AH: %p\n",
567 qp, swr->opcode, swr->wr_id, *roce_mode, grh->hop_limit,
568 0, has_vlan, get_qlnxr_ah((ud_wr(swr)->ah)));
571 /* GRH / IPv6 header */
572 udh->grh.traffic_class = grh->traffic_class;
573 udh->grh.flow_label = grh->flow_label;
574 udh->grh.hop_limit = grh->hop_limit;
575 udh->grh.destination_gid = grh->dgid;
576 memcpy(&udh->grh.source_gid.raw, &sgid.raw,
577 sizeof(udh->grh.source_gid.raw));
578 QL_DPRINT12(dev->ha, "header: tc: %x, flow_label : %x, "
579 "hop_limit: %x \n", udh->grh.traffic_class,
580 udh->grh.flow_label, udh->grh.hop_limit);
581 for (i = 0; i < 16; i++) {
582 QL_DPRINT12(dev->ha, "udh dgid = %x\n", udh->grh.destination_gid.raw[i]);
584 for (i = 0; i < 16; i++) {
585 QL_DPRINT12(dev->ha, "udh sgid = %x\n", udh->grh.source_gid.raw[i]);
587 udh->grh.next_header = 0x1b;
589 #ifdef DEFINE_IB_UD_HEADER_INIT_UDP_PRESENT
590 /* This is for RoCEv2 */
595 udh->ip4.protocol = IPPROTO_UDP;
596 udh->ip4.tos = htonl(grh->flow_label);
597 udh->ip4.frag_off = htons(IP_DF);
598 udh->ip4.ttl = grh->hop_limit;
600 ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
601 udh->ip4.saddr = ipv4_addr;
602 ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
603 udh->ip4.daddr = ipv4_addr;
604 /* note: checksum is calculated by the device */
609 udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
610 udh->bth.pkey = QLNXR_ROCE_PKEY_DEFAULT;/* TODO: ib_get_cahced_pkey?! */
611 //udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
612 udh->bth.destination_qpn = OSAL_CPU_TO_BE32(ud_wr(swr)->remote_qpn);
613 //udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
614 udh->bth.psn = OSAL_CPU_TO_BE32((qp->sq_psn++) & ((1 << 24) - 1));
615 udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
618 //udh->deth.qkey = htonl(0x80010000); /* qp->qkey */ /* TODO: what is?! */
619 //udh->deth.source_qpn = htonl(QLNXR_GSI_QPN);
620 udh->deth.qkey = OSAL_CPU_TO_BE32(0x80010000); /* qp->qkey */ /* TODO: what is?! */
621 udh->deth.source_qpn = OSAL_CPU_TO_BE32(QLNXR_GSI_QPN);
622 QL_DPRINT12(dev->ha, "exit\n");
627 qlnxr_gsi_build_packet(struct qlnxr_dev *dev,
628 struct qlnxr_qp *qp, const struct ib_send_wr *swr,
629 struct ecore_roce_ll2_packet **p_packet)
631 u8 ud_header_buffer[QLNXR_MAX_UD_HEADER_SIZE];
632 struct ecore_roce_ll2_packet *packet;
633 int roce_mode, header_size;
634 struct ib_ud_header udh;
637 QL_DPRINT12(dev->ha, "enter\n");
641 rc = qlnxr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
644 "qlnxr_gsi_build_header failed rc = %d\n", rc);
648 header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
650 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
652 QL_DPRINT11(dev->ha, "packet == NULL\n");
656 packet->header.vaddr = qlnx_dma_alloc_coherent(&dev->ha->cdev,
657 &packet->header.baddr,
659 if (!packet->header.vaddr) {
660 QL_DPRINT11(dev->ha, "packet->header.vaddr == NULL\n");
665 if (memcmp(udh.eth.smac_h, udh.eth.dmac_h, ETH_ALEN))
666 packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_NW;
668 packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_LB;
670 packet->roce_mode = roce_mode;
671 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
672 packet->header.len = header_size;
673 packet->n_seg = swr->num_sge;
674 qp->wqe_wr_id[qp->sq.prod].bytes_len = IB_GRH_BYTES; //RDMA_GRH_BYTES
675 for (i = 0; i < packet->n_seg; i++) {
676 packet->payload[i].baddr = swr->sg_list[i].addr;
677 packet->payload[i].len = swr->sg_list[i].length;
678 qp->wqe_wr_id[qp->sq.prod].bytes_len +=
679 packet->payload[i].len;
680 QL_DPRINT11(dev->ha, "baddr: %p, len: %d\n",
681 packet->payload[i].baddr,
682 packet->payload[i].len);
687 QL_DPRINT12(dev->ha, "exit, packet->n_seg: %d\n", packet->n_seg);
692 qlnxr_gsi_post_send(struct ib_qp *ibqp,
693 const struct ib_send_wr *wr,
694 const struct ib_send_wr **bad_wr)
696 struct ecore_roce_ll2_packet *pkt = NULL;
697 struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
698 struct qlnxr_dev *dev = qp->dev;
702 QL_DPRINT12(dev->ha, "exit\n");
704 if (qp->state != ECORE_ROCE_QP_STATE_RTS) {
706 "(qp->state != ECORE_ROCE_QP_STATE_RTS)\n");
711 if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
713 "(wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE)\n");
718 if (wr->opcode != IB_WR_SEND) {
719 QL_DPRINT11(dev->ha, "(wr->opcode > IB_WR_SEND)\n");
724 spin_lock_irqsave(&qp->q_lock, flags);
726 rc = qlnxr_gsi_build_packet(dev, qp, wr, &pkt);
728 spin_unlock_irqrestore(&qp->q_lock, flags);
729 QL_DPRINT11(dev->ha, "qlnxr_gsi_build_packet failed\n");
733 rc = qlnxr_ll2_post_tx(dev, pkt);
736 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
737 qp->wqe_wr_id[qp->sq.prod].signaled =
738 !!(wr->send_flags & IB_SEND_SIGNALED);
739 qp->wqe_wr_id[qp->sq.prod].opcode = IB_WC_SEND;
740 qlnxr_inc_sw_prod(&qp->sq);
741 QL_DPRINT11(dev->ha, "packet sent over gsi qp\n");
743 QL_DPRINT11(dev->ha, "qlnxr_ll2_post_tx failed\n");
748 spin_unlock_irqrestore(&qp->q_lock, flags);
750 if (wr->next != NULL) {
755 QL_DPRINT12(dev->ha, "exit\n");
760 QL_DPRINT12(dev->ha, "exit error\n");
764 #define QLNXR_LL2_RX_BUFFER_SIZE (4 * 1024)
766 qlnxr_gsi_post_recv(struct ib_qp *ibqp,
767 const struct ib_recv_wr *wr,
768 const struct ib_recv_wr **bad_wr)
770 struct qlnxr_dev *dev = get_qlnxr_dev((ibqp->device));
771 struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
775 QL_DPRINT12(dev->ha, "enter, wr: %p\n", wr);
777 if ((qp->state != ECORE_ROCE_QP_STATE_RTR) &&
778 (qp->state != ECORE_ROCE_QP_STATE_RTS)) {
780 QL_DPRINT11(dev->ha, "exit 0\n");
784 spin_lock_irqsave(&qp->q_lock, flags);
787 if (wr->num_sge > QLNXR_GSI_MAX_RECV_SGE) {
788 QL_DPRINT11(dev->ha, "exit 1\n");
792 rc = ecore_ll2_post_rx_buffer(dev->rdma_ctx,
795 wr->sg_list[0].length,
799 QL_DPRINT11(dev->ha, "exit 2\n");
803 memset(&qp->rqe_wr_id[qp->rq.prod], 0,
804 sizeof(qp->rqe_wr_id[qp->rq.prod]));
805 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
806 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
808 qlnxr_inc_sw_prod(&qp->rq);
813 spin_unlock_irqrestore(&qp->q_lock, flags);
815 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
819 spin_unlock_irqrestore(&qp->q_lock, flags);
822 QL_DPRINT12(dev->ha, "exit with -ENOMEM\n");
827 qlnxr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
829 struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
830 struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
831 struct qlnxr_qp *qp = dev->gsi_qp;
835 QL_DPRINT12(dev->ha, "enter\n");
837 spin_lock_irqsave(&cq->cq_lock, flags);
839 while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
840 memset(&wc[i], 0, sizeof(*wc));
842 wc[i].qp = &qp->ibqp;
843 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
844 wc[i].opcode = IB_WC_RECV;
845 wc[i].pkey_index = 0;
846 wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc)?
847 IB_WC_GENERAL_ERR:IB_WC_SUCCESS;
848 /* 0 - currently only one recv sg is supported */
849 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
850 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
852 #if __FreeBSD_version >= 1100000
853 memcpy(&wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac, ETH_ALEN);
854 wc[i].wc_flags |= IB_WC_WITH_SMAC;
856 if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
857 wc[i].wc_flags |= IB_WC_WITH_VLAN;
858 wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
862 qlnxr_inc_sw_cons(&qp->rq);
866 while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
867 memset(&wc[i], 0, sizeof(*wc));
869 wc[i].qp = &qp->ibqp;
870 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
871 wc[i].opcode = IB_WC_SEND;
872 wc[i].status = IB_WC_SUCCESS;
874 qlnxr_inc_sw_cons(&qp->sq);
878 spin_unlock_irqrestore(&cq->cq_lock, flags);
880 QL_DPRINT12(dev->ha, "exit i = %d\n", i);