2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <machine/in_cksum.h>
32 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
33 struct mlx5e_rx_wqe *wqe, u16 ix)
35 bus_dma_segment_t segs[rq->nsegs];
39 #if (MLX5E_MAX_RX_SEGS != 1)
43 if (rq->mbuf[ix].mbuf != NULL)
46 #if (MLX5E_MAX_RX_SEGS == 1)
47 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz);
51 mb->m_pkthdr.len = mb->m_len = rq->wqe_sz;
53 mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
55 if (unlikely(mb == NULL))
58 mb->m_len = MLX5E_MAX_RX_BYTES;
59 mb->m_pkthdr.len = MLX5E_MAX_RX_BYTES;
61 for (i = 1; i < rq->nsegs; i++) {
62 if (mb_head->m_pkthdr.len >= rq->wqe_sz)
64 mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0,
66 if (unlikely(mb == NULL)) {
70 mb->m_len = MLX5E_MAX_RX_BYTES;
71 mb_head->m_pkthdr.len += MLX5E_MAX_RX_BYTES;
73 /* rewind to first mbuf in chain */
76 /* get IP header aligned */
77 m_adj(mb, MLX5E_NET_IP_ALIGN);
79 err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map,
80 mb, segs, &nsegs, BUS_DMA_NOWAIT);
83 if (unlikely(nsegs == 0)) {
84 bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map);
88 #if (MLX5E_MAX_RX_SEGS == 1)
89 wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr);
91 wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr);
92 wqe->data[0].byte_count = cpu_to_be32(segs[0].ds_len |
93 MLX5_HW_START_PADDING);
94 for (i = 1; i != nsegs; i++) {
95 wqe->data[i].addr = cpu_to_be64(segs[i].ds_addr);
96 wqe->data[i].byte_count = cpu_to_be32(segs[i].ds_len);
98 for (; i < rq->nsegs; i++) {
99 wqe->data[i].addr = 0;
100 wqe->data[i].byte_count = 0;
104 rq->mbuf[ix].mbuf = mb;
105 rq->mbuf[ix].data = mb->m_data;
107 bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map,
108 BUS_DMASYNC_PREREAD);
117 mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
119 if (unlikely(rq->enabled == 0))
122 while (!mlx5_wq_ll_is_full(&rq->wq)) {
123 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head);
125 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) {
126 callout_reset_curcpu(&rq->watchdog, 1, (void *)&mlx5e_post_rx_wqes, rq);
129 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index));
132 /* ensure wqes are visible to device before updating doorbell record */
133 atomic_thread_fence_rel();
135 mlx5_wq_ll_update_db_record(&rq->wq);
139 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
141 /* TODO: consider vlans, ip options, ... */
142 struct ether_header *eh;
145 struct ip6_hdr *ip6 = NULL;
146 struct ip *ip4 = NULL;
152 eh = mtod(mb, struct ether_header *);
153 eh_type = ntohs(eh->ether_type);
155 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
156 tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
157 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
159 /* TODO: consider vlan */
160 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN;
164 ip4 = (struct ip *)(eh + 1);
165 th = (struct tcphdr *)(ip4 + 1);
168 ip6 = (struct ip6_hdr *)(eh + 1);
169 th = (struct tcphdr *)(ip6 + 1);
175 ts_ptr = (uint32_t *)(th + 1);
177 if (get_cqe_lro_tcppsh(cqe))
178 th->th_flags |= TH_PUSH;
181 th->th_flags |= TH_ACK;
182 th->th_ack = cqe->lro_ack_seq_num;
183 th->th_win = cqe->lro_tcp_win;
186 * FreeBSD handles only 32bit aligned timestamp right after
188 * +--------+--------+--------+--------+
189 * | NOP | NOP | TSopt | 10 |
190 * +--------+--------+--------+--------+
191 * | TSval timestamp |
192 * +--------+--------+--------+--------+
193 * | TSecr timestamp |
194 * +--------+--------+--------+--------+
196 if (get_cqe_lro_timestamp_valid(cqe) &&
197 (__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 |
198 TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 |
199 TCPOLEN_TIMESTAMP))) {
201 * cqe->timestamp is 64bit long.
202 * [0-31] - timestamp.
203 * [32-64] - timestamp echo replay.
205 ts_ptr[1] = *(uint32_t *)&cqe->timestamp;
206 ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1);
210 ip4->ip_ttl = cqe->lro_min_ttl;
211 ip4->ip_len = cpu_to_be16(tot_len);
213 ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
215 ip6->ip6_hlim = cqe->lro_min_ttl;
216 ip6->ip6_plen = cpu_to_be16(tot_len -
217 sizeof(struct ip6_hdr));
219 /* TODO: handle tcp checksum */
223 mlx5e_mbuf_tstmp(struct mlx5e_priv *priv, uint64_t hw_tstmp)
225 struct mlx5e_clbr_point *cp;
226 uint64_t a1, a2, res;
230 cp = &priv->clbr_points[priv->clbr_curr];
231 gen = atomic_load_acq_int(&cp->clbr_gen);
232 a1 = (hw_tstmp - cp->clbr_hw_prev) >> MLX5E_TSTMP_PREC;
233 a2 = (cp->base_curr - cp->base_prev) >> MLX5E_TSTMP_PREC;
234 res = (a1 * a2) << MLX5E_TSTMP_PREC;
237 * Divisor cannot be zero because calibration callback
238 * checks for the condition and disables timestamping
241 res /= (cp->clbr_hw_curr - cp->clbr_hw_prev) >>
244 res += cp->base_prev;
245 atomic_thread_fence_acq();
246 } while (gen == 0 || gen != cp->clbr_gen);
251 mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
252 struct mlx5e_rq *rq, struct mbuf *mb,
255 struct ifnet *ifp = rq->ifp;
256 struct mlx5e_channel *c;
257 #if (MLX5E_MAX_RX_SEGS != 1)
258 struct mbuf *mb_head;
260 int lro_num_seg; /* HW LRO session aggregated packets counter */
263 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
264 if (lro_num_seg > 1) {
265 mlx5e_lro_update_hdr(mb, cqe);
266 rq->stats.lro_packets++;
267 rq->stats.lro_bytes += cqe_bcnt;
270 #if (MLX5E_MAX_RX_SEGS == 1)
271 mb->m_pkthdr.len = mb->m_len = cqe_bcnt;
273 mb->m_pkthdr.len = cqe_bcnt;
274 for (mb_head = mb; mb != NULL; mb = mb->m_next) {
275 if (mb->m_len > cqe_bcnt)
276 mb->m_len = cqe_bcnt;
277 cqe_bcnt -= mb->m_len;
278 if (likely(cqe_bcnt == 0)) {
279 if (likely(mb->m_next != NULL)) {
280 /* trim off empty mbufs */
287 /* rewind to first mbuf in chain */
290 /* check if a Toeplitz hash was computed */
291 if (cqe->rss_hash_type != 0) {
292 mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
294 /* decode the RSS hash type */
295 switch (cqe->rss_hash_type &
296 (CQE_RSS_DST_HTYPE_L4 | CQE_RSS_DST_HTYPE_IP)) {
298 case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV4):
299 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4);
301 case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV4):
302 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4);
304 case CQE_RSS_DST_HTYPE_IPV4:
305 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4);
308 case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV6):
309 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6);
311 case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV6):
312 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6);
314 case CQE_RSS_DST_HTYPE_IPV6:
315 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6);
318 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH);
322 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH);
325 mb->m_pkthdr.flowid = rq->ix;
326 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
328 mb->m_pkthdr.rcvif = ifp;
330 if (likely(ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
331 ((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) ==
332 (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK))) {
333 mb->m_pkthdr.csum_flags =
334 CSUM_IP_CHECKED | CSUM_IP_VALID |
335 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
336 mb->m_pkthdr.csum_data = htons(0xffff);
338 rq->stats.csum_none++;
341 if (cqe_has_vlan(cqe)) {
342 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info);
343 mb->m_flags |= M_VLANTAG;
346 c = container_of(rq, struct mlx5e_channel, rq);
347 if (c->priv->clbr_done >= 2) {
348 tstmp = mlx5e_mbuf_tstmp(c->priv, be64_to_cpu(cqe->timestamp));
349 if ((tstmp & MLX5_CQE_TSTMP_PTP) != 0) {
351 * Timestamp was taken on the packet entrance,
352 * instead of the cqe generation.
354 tstmp &= ~MLX5_CQE_TSTMP_PTP;
355 mb->m_flags |= M_TSTMP_HPREC;
357 mb->m_pkthdr.rcv_tstmp = tstmp;
358 mb->m_flags |= M_TSTMP;
363 mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
365 memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, (cc & cq->wq.sz_m1)),
366 sizeof(struct mlx5_cqe64));
370 mlx5e_write_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
372 memcpy(mlx5_cqwq_get_wqe(&cq->wq, cc & cq->wq.sz_m1),
373 data, sizeof(struct mlx5_cqe64));
377 mlx5e_decompress_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *title,
378 struct mlx5_mini_cqe8 *mini,
379 u16 wqe_counter, int i)
382 * NOTE: The fields which are not set here are copied from the
383 * initial and common title. See memcpy() in
384 * mlx5e_write_cqe_slot().
386 title->byte_cnt = mini->byte_cnt;
387 title->wqe_counter = cpu_to_be16((wqe_counter + i) & cq->wq.sz_m1);
388 title->check_sum = mini->checksum;
389 title->op_own = (title->op_own & 0xf0) |
390 (((cq->wq.cc + i) >> cq->wq.log_sz) & 1);
393 #define MLX5E_MINI_ARRAY_SZ 8
394 /* Make sure structs are not packet differently */
395 CTASSERT(sizeof(struct mlx5_cqe64) ==
396 sizeof(struct mlx5_mini_cqe8) * MLX5E_MINI_ARRAY_SZ);
398 mlx5e_decompress_cqes(struct mlx5e_cq *cq)
400 struct mlx5_mini_cqe8 mini_array[MLX5E_MINI_ARRAY_SZ];
401 struct mlx5_cqe64 title;
404 u16 title_wqe_counter;
406 mlx5e_read_cqe_slot(cq, cq->wq.cc, &title);
407 title_wqe_counter = be16_to_cpu(title.wqe_counter);
408 cqe_count = be32_to_cpu(title.byte_cnt);
410 /* Make sure we won't overflow */
411 KASSERT(cqe_count <= cq->wq.sz_m1,
412 ("%s: cqe_count %u > cq->wq.sz_m1 %u", __func__,
413 cqe_count, cq->wq.sz_m1));
415 mlx5e_read_cqe_slot(cq, cq->wq.cc + 1, mini_array);
417 mlx5e_decompress_cqe(cq, &title,
418 &mini_array[i % MLX5E_MINI_ARRAY_SZ],
419 title_wqe_counter, i);
420 mlx5e_write_cqe_slot(cq, cq->wq.cc + i, &title);
425 if (i % MLX5E_MINI_ARRAY_SZ == 0)
426 mlx5e_read_cqe_slot(cq, cq->wq.cc + i, mini_array);
431 mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
435 for (i = 0; i < budget; i++) {
436 struct mlx5e_rx_wqe *wqe;
437 struct mlx5_cqe64 *cqe;
439 __be16 wqe_counter_be;
443 cqe = mlx5e_get_cqe(&rq->cq);
447 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)
448 mlx5e_decompress_cqes(&rq->cq);
450 mlx5_cqwq_pop(&rq->cq.wq);
452 wqe_counter_be = cqe->wqe_counter;
453 wqe_counter = be16_to_cpu(wqe_counter_be);
454 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
455 byte_cnt = be32_to_cpu(cqe->byte_cnt);
457 bus_dmamap_sync(rq->dma_tag,
458 rq->mbuf[wqe_counter].dma_map,
459 BUS_DMASYNC_POSTREAD);
461 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
465 if ((MHLEN - MLX5E_NET_IP_ALIGN) >= byte_cnt &&
466 (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
467 #if (MLX5E_MAX_RX_SEGS != 1)
468 /* set maximum mbuf length */
469 mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN;
471 /* get IP header aligned */
472 mb->m_data += MLX5E_NET_IP_ALIGN;
474 bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t),
477 mb = rq->mbuf[wqe_counter].mbuf;
478 rq->mbuf[wqe_counter].mbuf = NULL; /* safety clear */
480 bus_dmamap_unload(rq->dma_tag,
481 rq->mbuf[wqe_counter].dma_map);
484 mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt);
487 #if !defined(HAVE_TCP_LRO_RX)
488 tcp_lro_queue_mbuf(&rq->lro, mb);
490 if (mb->m_pkthdr.csum_flags == 0 ||
491 (rq->ifp->if_capenable & IFCAP_LRO) == 0 ||
492 rq->lro.lro_cnt == 0 ||
493 tcp_lro_rx(&rq->lro, mb, 0) != 0) {
494 rq->ifp->if_input(rq->ifp, mb);
498 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
499 &wqe->next.next_wqe_index);
502 mlx5_cqwq_update_db_record(&rq->cq.wq);
504 /* ensure cq space is freed before enabling more cqes */
505 atomic_thread_fence_rel();
510 mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq)
512 struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq);
515 #ifdef HAVE_PER_CQ_EVENT_PACKET
517 #error "MHLEN is too small"
519 struct mbuf *mb = m_gethdr(M_NOWAIT, MT_DATA);
522 /* this code is used for debugging purpose only */
523 mb->m_pkthdr.len = mb->m_len = 15;
524 memset(mb->m_data, 255, 14);
525 mb->m_data[14] = rq->ix;
526 mb->m_pkthdr.rcvif = rq->ifp;
527 rq->ifp->if_input(rq->ifp, mb);
534 * Polling the entire CQ without posting new WQEs results in
535 * lack of receive WQEs during heavy traffic scenarios.
538 if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) !=
541 i += MLX5E_RX_BUDGET_MAX;
542 if (i >= MLX5E_BUDGET_MAX)
544 mlx5e_post_rx_wqes(rq);
546 mlx5e_post_rx_wqes(rq);
547 mlx5e_cq_arm(&rq->cq, MLX5_GET_DOORBELL_LOCK(&rq->channel->priv->doorbell_lock));
548 tcp_lro_flush_all(&rq->lro);
549 mtx_unlock(&rq->mtx);