2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <machine/atomic.h>
32 mlx5e_do_send_cqe(struct mlx5e_sq *sq)
35 /* interleave the CQEs */
36 if (sq->cev_counter >= sq->cev_factor) {
44 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
46 u16 pi = sq->pc & sq->wq.sz_m1;
47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
53 if (mlx5e_do_send_cqe(sq))
54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
56 wqe->ctrl.fm_ce_se = 0;
58 /* Copy data for doorbell */
59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
61 sq->mbuf[pi].mbuf = NULL;
62 sq->mbuf[pi].num_bytes = 0;
63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
64 sq->pc += sq->mbuf[pi].num_wqebbs;
67 #if (__FreeBSD_version >= 1100000)
68 static uint32_t mlx5e_hash_value;
71 mlx5e_hash_init(void *arg)
73 mlx5e_hash_value = m_ether_tcpip_hash_init();
76 /* Make kernel call mlx5e_hash_init after the random stack finished initializing */
77 SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
80 static struct mlx5e_sq *
81 mlx5e_select_queue_by_send_tag(struct ifnet *ifp, struct mbuf *mb)
83 struct mlx5e_snd_tag *ptag;
86 /* check for route change */
87 if (mb->m_pkthdr.snd_tag->ifp != ifp)
90 /* get pointer to sendqueue */
91 ptag = container_of(mb->m_pkthdr.snd_tag,
92 struct mlx5e_snd_tag, m_snd_tag);
96 case IF_SND_TAG_TYPE_RATE_LIMIT:
97 sq = container_of(ptag,
98 struct mlx5e_rl_channel, tag)->sq;
101 case IF_SND_TAG_TYPE_UNLIMITED:
102 sq = &container_of(ptag,
103 struct mlx5e_channel, tag)->sq[0];
105 struct mlx5e_priv *priv = ifp->if_softc;
106 priv->channel_refs > 0; }),
107 ("mlx5e_select_queue: Channel refs are zero for unlimited tag"));
115 if (sq != NULL && READ_ONCE(sq->running) != 0)
121 static struct mlx5e_sq *
122 mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
124 struct mlx5e_priv *priv = ifp->if_softc;
129 /* obtain VLAN information if present */
130 if (mb->m_flags & M_VLANTAG) {
131 tc = (mb->m_pkthdr.ether_vtag >> 13);
132 if (tc >= priv->num_tc)
133 tc = priv->default_vlan_prio;
135 tc = priv->default_vlan_prio;
138 ch = priv->params.num_channels;
140 /* check if flowid is set */
141 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
145 if (rss_hash2bucket(mb->m_pkthdr.flowid,
146 M_HASHTYPE_GET(mb), &temp) == 0)
150 ch = (mb->m_pkthdr.flowid % 128) % ch;
152 #if (__FreeBSD_version >= 1100000)
153 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
154 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
157 * m_ether_tcpip_hash not present in stable, so just
158 * throw unhashed mbufs on queue 0
164 /* check if send queue is running */
165 sq = &priv->channel[ch].sq[tc];
166 if (likely(READ_ONCE(sq->running) != 0))
172 mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb)
174 struct ether_vlan_header *eh;
178 eh = mtod(mb, struct ether_vlan_header *);
179 if (unlikely(mb->m_len < ETHER_HDR_LEN)) {
181 } else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
182 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)))
184 eth_type = ntohs(eh->evl_proto);
185 min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
187 eth_type = ntohs(eh->evl_encap_proto);
188 min_inline = ETHER_HDR_LEN;
195 * Make sure the TOS(IPv4) or traffic class(IPv6)
196 * field gets inlined. Else the SQ may stall.
205 * m_copydata() will be used on the remaining header which
206 * does not need to reside within the first m_len bytes of
209 if (mb->m_pkthdr.len < min_inline)
214 return (MIN(mb->m_pkthdr.len, sq->max_inline));
218 mlx5e_get_full_header_size(struct mbuf *mb)
220 struct ether_vlan_header *eh;
223 int ip_hlen, tcp_hlen;
228 eh = mtod(mb, struct ether_vlan_header *);
229 if (mb->m_len < ETHER_HDR_LEN)
231 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
232 if (mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
234 eth_type = ntohs(eh->evl_proto);
235 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
237 eth_type = ntohs(eh->evl_encap_proto);
238 eth_hdr_len = ETHER_HDR_LEN;
242 ip = (struct ip *)(mb->m_data + eth_hdr_len);
243 if (mb->m_len < eth_hdr_len + sizeof(*ip))
247 ip_hlen = ip->ip_hl << 2;
248 eth_hdr_len += ip_hlen;
251 ip_hlen = ip->ip_hl << 2;
252 eth_hdr_len += ip_hlen + 8;
259 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
260 if (mb->m_len < eth_hdr_len + sizeof(*ip6))
262 switch (ip6->ip6_nxt) {
264 eth_hdr_len += sizeof(*ip6);
267 eth_hdr_len += sizeof(*ip6) + 8;
276 if (mb->m_len < eth_hdr_len + sizeof(*th))
278 th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
279 tcp_hlen = th->th_off << 2;
280 eth_hdr_len += tcp_hlen;
283 * m_copydata() will be used on the remaining header which
284 * does not need to reside within the first m_len bytes of
287 if (mb->m_pkthdr.len < eth_hdr_len)
289 return (eth_hdr_len);
293 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
295 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
296 struct mlx5_wqe_data_seg *dseg;
297 struct mlx5e_tx_wqe *wqe;
302 struct mbuf *mb = *mbp;
308 /* Return ENOBUFS if the queue is full */
309 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS)))
312 /* Align SQ edge with NOPs to avoid WQE wrap around */
313 pi = ((~sq->pc) & sq->wq.sz_m1);
314 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
315 /* Send one multi NOP message instead of many */
316 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
317 pi = ((~sq->pc) & sq->wq.sz_m1);
318 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1))
322 /* Setup local variables */
323 pi = sq->pc & sq->wq.sz_m1;
324 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
327 memset(wqe, 0, sizeof(*wqe));
329 /* Send a copy of the frame to the BPF listener, if any */
330 if (ifp != NULL && ifp->if_bpf != NULL)
331 ETHER_BPF_MTAP(ifp, mb);
333 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
334 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
336 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
337 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
339 if (wqe->eth.cs_flags == 0) {
340 sq->stats.csum_offload_none++;
342 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
344 u32 mss = mb->m_pkthdr.tso_segsz;
347 wqe->eth.mss = cpu_to_be16(mss);
348 opcode = MLX5_OPCODE_LSO;
349 ihs = mlx5e_get_full_header_size(mb);
350 if (unlikely(ihs == 0)) {
354 payload_len = mb->m_pkthdr.len - ihs;
355 if (payload_len == 0)
358 num_pkts = DIV_ROUND_UP(payload_len, mss);
359 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs);
361 sq->stats.tso_packets++;
362 sq->stats.tso_bytes += payload_len;
364 opcode = MLX5_OPCODE_SEND;
366 switch (sq->min_inline_mode) {
367 case MLX5_INLINE_MODE_IP:
368 case MLX5_INLINE_MODE_TCP_UDP:
369 ihs = mlx5e_get_full_header_size(mb);
370 if (unlikely(ihs == 0))
371 ihs = mlx5e_get_l2_header_size(sq, mb);
373 case MLX5_INLINE_MODE_L2:
374 ihs = mlx5e_get_l2_header_size(sq, mb);
376 case MLX5_INLINE_MODE_NONE:
379 if ((mb->m_flags & M_VLANTAG) != 0 &&
380 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) {
381 /* inlining VLAN data is not required */
382 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */
383 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
385 } else if ((mb->m_flags & M_VLANTAG) == 0 &&
386 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) {
387 /* inlining non-VLAN data is not required */
390 /* we are forced to inlining L2 header, if any */
391 ihs = mlx5e_get_l2_header_size(sq, mb);
395 sq->mbuf[pi].num_bytes = max_t (unsigned int,
396 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
399 if (likely(ihs == 0)) {
400 /* nothing to inline */
401 } else if (unlikely(ihs > sq->max_inline)) {
402 /* inline header size is too big */
405 } else if ((mb->m_flags & M_VLANTAG) != 0) {
406 struct ether_vlan_header *eh = (struct ether_vlan_header *)
407 wqe->eth.inline_hdr_start;
410 if (unlikely(ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN)))
411 ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
412 else if (unlikely(ihs < ETHER_HDR_LEN)) {
416 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
417 m_adj(mb, ETHER_HDR_LEN);
418 /* Insert 4 bytes VLAN tag into data stream */
419 eh->evl_proto = eh->evl_encap_proto;
420 eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
421 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
422 /* Copy rest of header data, if any */
423 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
424 m_adj(mb, ihs - ETHER_HDR_LEN);
425 /* Extend header by 4 bytes */
426 ihs += ETHER_VLAN_ENCAP_LEN;
427 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
429 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
431 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
434 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
435 if (ihs > sizeof(wqe->eth.inline_hdr_start)) {
436 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
439 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
441 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
442 mb, segs, &nsegs, BUS_DMA_NOWAIT);
444 /* Update statistics */
445 sq->stats.defragged++;
446 /* Too many mbuf fragments */
447 mb = m_defrag(*mbp, M_NOWAIT);
453 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
454 mb, segs, &nsegs, BUS_DMA_NOWAIT);
460 /* Make sure all mbuf data, if any, is written to RAM */
462 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
463 BUS_DMASYNC_PREWRITE);
465 /* All data was inlined, free the mbuf. */
466 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
471 for (x = 0; x != nsegs; x++) {
472 if (segs[x].ds_len == 0)
474 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
475 dseg->lkey = sq->mkey_be;
476 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
480 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
482 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
483 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
484 if (mlx5e_do_send_cqe(sq))
485 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
487 wqe->ctrl.fm_ce_se = 0;
489 /* Copy data for doorbell */
490 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
492 /* Store pointer to mbuf */
493 sq->mbuf[pi].mbuf = mb;
494 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
495 sq->pc += sq->mbuf[pi].num_wqebbs;
497 /* Count all traffic going out */
499 sq->stats.bytes += sq->mbuf[pi].num_bytes;
501 *mbp = NULL; /* safety clear */
512 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
517 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
518 * otherwise a cq overrun may occur
523 struct mlx5_cqe64 *cqe;
528 cqe = mlx5e_get_cqe(&sq->cq);
532 mlx5_cqwq_pop(&sq->cq.wq);
534 /* update budget according to the event factor */
535 budget -= sq->cev_factor;
537 for (x = 0; x != sq->cev_factor; x++) {
538 ci = sqcc & sq->wq.sz_m1;
539 mb = sq->mbuf[ci].mbuf;
540 sq->mbuf[ci].mbuf = NULL;
543 if (sq->mbuf[ci].num_bytes == 0) {
548 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
549 BUS_DMASYNC_POSTWRITE);
550 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
552 /* Free transmitted mbuf */
555 sqcc += sq->mbuf[ci].num_wqebbs;
559 mlx5_cqwq_update_db_record(&sq->cq.wq);
561 /* Ensure cq space is freed before enabling more cqes */
562 atomic_thread_fence_rel();
568 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
572 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
573 READ_ONCE(sq->running) == 0)) {
579 if (mlx5e_sq_xmit(sq, &mb) != 0) {
580 /* NOTE: m_freem() is NULL safe */
585 /* Check if we need to write the doorbell */
586 if (likely(sq->doorbell.d64 != 0)) {
587 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
588 sq->doorbell.d64 = 0;
592 * Check if we need to start the event timer which flushes the
593 * transmit ring on timeout:
595 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
596 sq->cev_factor != 1)) {
597 /* start the timer */
598 mlx5e_sq_cev_timeout(sq);
600 /* don't send NOPs yet */
601 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
607 mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
612 if ((mb->m_pkthdr.csum_flags & CSUM_SND_TAG) != 0 &&
613 (mb->m_pkthdr.snd_tag != NULL)) {
614 sq = mlx5e_select_queue_by_send_tag(ifp, mb);
615 if (unlikely(sq == NULL)) {
616 /* Check for route change */
617 if (mb->m_pkthdr.snd_tag->ifp != ifp) {
622 * Tell upper layers about route
623 * change and to re-transmit this
632 sq = mlx5e_select_queue(ifp, mb);
633 if (unlikely(sq == NULL)) {
637 /* Invalid send queue */
643 ret = mlx5e_xmit_locked(ifp, sq, mb);
644 mtx_unlock(&sq->lock);
650 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
652 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
654 mtx_lock(&sq->comp_lock);
655 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
656 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
657 mtx_unlock(&sq->comp_lock);