2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <machine/atomic.h>
32 mlx5e_do_send_cqe(struct mlx5e_sq *sq)
35 /* interleave the CQEs */
36 if (sq->cev_counter >= sq->cev_factor) {
44 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
46 u16 pi = sq->pc & sq->wq.sz_m1;
47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
53 if (mlx5e_do_send_cqe(sq))
54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
56 wqe->ctrl.fm_ce_se = 0;
58 /* Copy data for doorbell */
59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
61 sq->mbuf[pi].mbuf = NULL;
62 sq->mbuf[pi].num_bytes = 0;
63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
64 sq->pc += sq->mbuf[pi].num_wqebbs;
67 #if (__FreeBSD_version >= 1100000)
68 static uint32_t mlx5e_hash_value;
71 mlx5e_hash_init(void *arg)
73 mlx5e_hash_value = m_ether_tcpip_hash_init();
76 /* Make kernel call mlx5e_hash_init after the random stack finished initializing */
77 SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
80 static struct mlx5e_sq *
81 mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
83 struct mlx5e_priv *priv = ifp->if_softc;
84 struct mlx5e_channel * volatile *ppch;
85 struct mlx5e_channel *pch;
91 /* check if channels are successfully opened */
92 if (unlikely(ppch == NULL))
95 /* obtain VLAN information if present */
96 if (mb->m_flags & M_VLANTAG) {
97 tc = (mb->m_pkthdr.ether_vtag >> 13);
98 if (tc >= priv->num_tc)
99 tc = priv->default_vlan_prio;
101 tc = priv->default_vlan_prio;
104 ch = priv->params.num_channels;
107 if (mb->m_pkthdr.snd_tag != NULL) {
110 /* check for route change */
111 if (mb->m_pkthdr.snd_tag->ifp != ifp)
114 /* get pointer to sendqueue */
115 sq = container_of(mb->m_pkthdr.snd_tag,
116 struct mlx5e_rl_channel, m_snd_tag)->sq;
119 if (sq != NULL && sq->stopped == 0)
125 /* check if flowid is set */
126 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
130 if (rss_hash2bucket(mb->m_pkthdr.flowid,
131 M_HASHTYPE_GET(mb), &temp) == 0)
135 ch = (mb->m_pkthdr.flowid % 128) % ch;
137 #if (__FreeBSD_version >= 1100000)
138 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
139 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
142 * m_ether_tcpip_hash not present in stable, so just
143 * throw unhashed mbufs on queue 0
149 /* check if channel is allocated and not stopped */
151 if (likely(pch != NULL && pch->sq[tc].stopped == 0))
152 return (&pch->sq[tc]);
157 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb)
160 switch(sq->min_inline_mode) {
161 case MLX5_INLINE_MODE_NONE:
163 * When inline mode is NONE, we do not need to copy
164 * headers into WQEs, except when vlan tag framing is
165 * requested. Hardware might offload vlan tagging on
166 * transmit. This is a separate capability, which is
167 * known to be disabled on ConnectX-5 due to a hardware
168 * bug RM 931383. If vlan_inline_cap is not present and
169 * the packet has vlan tag, fall back to inlining.
171 if ((mb->m_flags & M_VLANTAG) != 0 &&
172 sq->vlan_inline_cap == 0)
175 case MLX5_INLINE_MODE_L2:
177 * Due to hardware limitations, when trust mode is
178 * DSCP, the hardware may request MLX5_INLINE_MODE_L2
179 * while it really needs all L2 headers and the 4 first
180 * bytes of the IP header (which include the
181 * TOS/traffic-class).
183 * To avoid doing a firmware command for querying the
184 * trust state and parsing the mbuf for doing
185 * unnecessary checks (VLAN/eth_type) in the fast path,
186 * we are going for the worth case (22 Bytes) if
187 * the mb->m_pkthdr.len allows it.
189 if (mb->m_pkthdr.len > ETHER_HDR_LEN +
190 ETHER_VLAN_ENCAP_LEN + 4)
191 return (MIN(sq->max_inline, ETHER_HDR_LEN +
192 ETHER_VLAN_ENCAP_LEN + 4));
195 return (MIN(sq->max_inline, mb->m_pkthdr.len));
199 mlx5e_get_header_size(struct mbuf *mb)
201 struct ether_vlan_header *eh;
204 int ip_hlen, tcp_hlen;
209 eh = mtod(mb, struct ether_vlan_header *);
210 if (mb->m_len < ETHER_HDR_LEN)
212 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
213 eth_type = ntohs(eh->evl_proto);
214 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
216 eth_type = ntohs(eh->evl_encap_proto);
217 eth_hdr_len = ETHER_HDR_LEN;
219 if (mb->m_len < eth_hdr_len)
223 ip = (struct ip *)(mb->m_data + eth_hdr_len);
224 if (mb->m_len < eth_hdr_len + sizeof(*ip))
226 if (ip->ip_p != IPPROTO_TCP)
228 ip_hlen = ip->ip_hl << 2;
229 eth_hdr_len += ip_hlen;
232 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
233 if (mb->m_len < eth_hdr_len + sizeof(*ip6))
235 if (ip6->ip6_nxt != IPPROTO_TCP)
237 eth_hdr_len += sizeof(*ip6);
242 if (mb->m_len < eth_hdr_len + sizeof(*th))
244 th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
245 tcp_hlen = th->th_off << 2;
246 eth_hdr_len += tcp_hlen;
247 if (mb->m_len < eth_hdr_len)
249 return (eth_hdr_len);
253 * The return value is not going back to the stack because of
257 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
259 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
260 struct mlx5_wqe_data_seg *dseg;
261 struct mlx5e_tx_wqe *wqe;
266 struct mbuf *mb = *mbp;
273 * Return ENOBUFS if the queue is full, this may trigger reinsertion
274 * of the mbuf into the drbr (see mlx5e_xmit_locked)
276 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
280 /* Align SQ edge with NOPs to avoid WQE wrap around */
281 pi = ((~sq->pc) & sq->wq.sz_m1);
282 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
283 /* Send one multi NOP message instead of many */
284 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
285 pi = ((~sq->pc) & sq->wq.sz_m1);
286 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1))
290 /* Setup local variables */
291 pi = sq->pc & sq->wq.sz_m1;
292 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
295 memset(wqe, 0, sizeof(*wqe));
297 /* Send a copy of the frame to the BPF listener, if any */
298 if (ifp != NULL && ifp->if_bpf != NULL)
299 ETHER_BPF_MTAP(ifp, mb);
301 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
302 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
304 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
305 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
307 if (wqe->eth.cs_flags == 0) {
308 sq->stats.csum_offload_none++;
310 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
312 u32 mss = mb->m_pkthdr.tso_segsz;
315 wqe->eth.mss = cpu_to_be16(mss);
316 opcode = MLX5_OPCODE_LSO;
317 ihs = mlx5e_get_header_size(mb);
318 payload_len = mb->m_pkthdr.len - ihs;
319 if (payload_len == 0)
322 num_pkts = DIV_ROUND_UP(payload_len, mss);
323 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs);
325 sq->stats.tso_packets++;
326 sq->stats.tso_bytes += payload_len;
328 opcode = MLX5_OPCODE_SEND;
329 ihs = mlx5e_get_inline_hdr_size(sq, mb);
330 sq->mbuf[pi].num_bytes = max_t (unsigned int,
331 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
334 if ((mb->m_flags & M_VLANTAG) != 0) {
335 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */
336 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
338 wqe->eth.inline_hdr_sz = 0;
341 if ((mb->m_flags & M_VLANTAG) != 0) {
342 struct ether_vlan_header *eh = (struct ether_vlan_header
343 *)wqe->eth.inline_hdr_start;
346 if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
347 ihs = (MLX5E_MAX_TX_INLINE -
348 ETHER_VLAN_ENCAP_LEN);
349 else if (ihs < ETHER_HDR_LEN) {
353 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
354 m_adj(mb, ETHER_HDR_LEN);
355 /* Insert 4 bytes VLAN tag into data stream */
356 eh->evl_proto = eh->evl_encap_proto;
357 eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
358 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
359 /* Copy rest of header data, if any */
360 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh +
362 m_adj(mb, ihs - ETHER_HDR_LEN);
363 /* Extend header by 4 bytes */
364 ihs += ETHER_VLAN_ENCAP_LEN;
366 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
369 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
372 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
373 if (ihs > sizeof(wqe->eth.inline_hdr_start)) {
374 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
377 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
379 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
380 mb, segs, &nsegs, BUS_DMA_NOWAIT);
382 /* Update statistics */
383 sq->stats.defragged++;
384 /* Too many mbuf fragments */
385 mb = m_defrag(*mbp, M_NOWAIT);
391 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
392 mb, segs, &nsegs, BUS_DMA_NOWAIT);
398 /* Make sure all mbuf data, if any, is written to RAM */
400 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
401 BUS_DMASYNC_PREWRITE);
403 /* All data was inlined, free the mbuf. */
404 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
409 for (x = 0; x != nsegs; x++) {
410 if (segs[x].ds_len == 0)
412 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
413 dseg->lkey = sq->mkey_be;
414 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
418 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
420 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
421 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
422 if (mlx5e_do_send_cqe(sq))
423 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
425 wqe->ctrl.fm_ce_se = 0;
427 /* Copy data for doorbell */
428 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
430 /* Store pointer to mbuf */
431 sq->mbuf[pi].mbuf = mb;
432 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
433 sq->pc += sq->mbuf[pi].num_wqebbs;
436 *mbp = NULL; /* safety clear */
447 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
452 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
453 * otherwise a cq overrun may occur
458 struct mlx5_cqe64 *cqe;
463 cqe = mlx5e_get_cqe(&sq->cq);
467 mlx5_cqwq_pop(&sq->cq.wq);
469 /* update budget according to the event factor */
470 budget -= sq->cev_factor;
472 for (x = 0; x != sq->cev_factor; x++) {
473 ci = sqcc & sq->wq.sz_m1;
474 mb = sq->mbuf[ci].mbuf;
475 sq->mbuf[ci].mbuf = NULL; /* Safety clear */
478 if (sq->mbuf[ci].num_bytes == 0) {
483 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
484 BUS_DMASYNC_POSTWRITE);
485 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
487 /* Free transmitted mbuf */
490 sqcc += sq->mbuf[ci].num_wqebbs;
494 mlx5_cqwq_update_db_record(&sq->cq.wq);
496 /* Ensure cq space is freed before enabling more cqes */
497 atomic_thread_fence_rel();
501 if (sq->sq_tq != NULL &&
502 atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY))
503 taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
507 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
512 if (likely(mb != NULL)) {
514 * If we can't insert mbuf into drbr, try to xmit anyway.
515 * We keep the error we got so we could return that after xmit.
517 err = drbr_enqueue(ifp, sq->br, mb);
521 * Check if the network interface is closed or if the SQ is
524 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
528 /* Process the queue */
529 while ((next = drbr_peek(ifp, sq->br)) != NULL) {
530 if (mlx5e_sq_xmit(sq, &next) != 0) {
532 drbr_advance(ifp, sq->br);
534 drbr_putback(ifp, sq->br, next);
535 atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL);
539 drbr_advance(ifp, sq->br);
541 /* Check if we need to write the doorbell */
542 if (likely(sq->doorbell.d64 != 0)) {
543 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
544 sq->doorbell.d64 = 0;
547 * Check if we need to start the event timer which flushes the
548 * transmit ring on timeout:
550 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
551 sq->cev_factor != 1)) {
552 /* start the timer */
553 mlx5e_sq_cev_timeout(sq);
555 /* don't send NOPs yet */
556 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
562 mlx5e_xmit_locked_no_br(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
566 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
573 if (mlx5e_sq_xmit(sq, &mb) != 0) {
574 /* NOTE: m_freem() is NULL safe */
579 /* Check if we need to write the doorbell */
580 if (likely(sq->doorbell.d64 != 0)) {
581 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
582 sq->doorbell.d64 = 0;
586 * Check if we need to start the event timer which flushes the
587 * transmit ring on timeout:
589 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
590 sq->cev_factor != 1)) {
591 /* start the timer */
592 mlx5e_sq_cev_timeout(sq);
594 /* don't send NOPs yet */
595 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
601 mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
606 sq = mlx5e_select_queue(ifp, mb);
607 if (unlikely(sq == NULL)) {
609 /* Check for route change */
610 if (mb->m_pkthdr.snd_tag != NULL &&
611 mb->m_pkthdr.snd_tag->ifp != ifp) {
616 * Tell upper layers about route change and to
617 * re-transmit this packet:
625 /* Invalid send queue */
629 if (unlikely(sq->br == NULL)) {
630 /* rate limited traffic */
632 ret = mlx5e_xmit_locked_no_br(ifp, sq, mb);
633 mtx_unlock(&sq->lock);
634 } else if (mtx_trylock(&sq->lock)) {
635 ret = mlx5e_xmit_locked(ifp, sq, mb);
636 mtx_unlock(&sq->lock);
638 ret = drbr_enqueue(ifp, sq->br, mb);
639 taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
646 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
648 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
650 mtx_lock(&sq->comp_lock);
651 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
652 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
653 mtx_unlock(&sq->comp_lock);
657 mlx5e_tx_que(void *context, int pending)
659 struct mlx5e_sq *sq = context;
660 struct ifnet *ifp = sq->ifp;
662 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
664 if (!drbr_empty(ifp, sq->br))
665 mlx5e_xmit_locked(ifp, sq, NULL);
666 mtx_unlock(&sq->lock);