2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <machine/atomic.h>
32 mlx5e_do_send_cqe(struct mlx5e_sq *sq)
35 /* interleave the CQEs */
36 if (sq->cev_counter >= sq->cev_factor) {
44 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
46 u16 pi = sq->pc & sq->wq.sz_m1;
47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
53 if (mlx5e_do_send_cqe(sq))
54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
56 wqe->ctrl.fm_ce_se = 0;
58 /* Copy data for doorbell */
59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
61 sq->mbuf[pi].mbuf = NULL;
62 sq->mbuf[pi].num_bytes = 0;
63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
64 sq->pc += sq->mbuf[pi].num_wqebbs;
67 #if (__FreeBSD_version >= 1100000)
68 static uint32_t mlx5e_hash_value;
71 mlx5e_hash_init(void *arg)
73 mlx5e_hash_value = m_ether_tcpip_hash_init();
76 /* Make kernel call mlx5e_hash_init after the random stack finished initializing */
77 SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
80 static struct mlx5e_sq *
81 mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
83 struct mlx5e_priv *priv = ifp->if_softc;
84 struct mlx5e_channel * volatile *ppch;
85 struct mlx5e_channel *pch;
91 /* check if channels are successfully opened */
92 if (unlikely(ppch == NULL))
95 /* obtain VLAN information if present */
96 if (mb->m_flags & M_VLANTAG) {
97 tc = (mb->m_pkthdr.ether_vtag >> 13);
98 if (tc >= priv->num_tc)
99 tc = priv->default_vlan_prio;
101 tc = priv->default_vlan_prio;
104 ch = priv->params.num_channels;
106 /* check if flowid is set */
107 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
111 if (rss_hash2bucket(mb->m_pkthdr.flowid,
112 M_HASHTYPE_GET(mb), &temp) == 0)
116 ch = (mb->m_pkthdr.flowid % 128) % ch;
118 #if (__FreeBSD_version >= 1100000)
119 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
120 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
123 * m_ether_tcpip_hash not present in stable, so just
124 * throw unhashed mbufs on queue 0
130 /* check if channel is allocated and not stopped */
132 if (likely(pch != NULL && pch->sq[tc].stopped == 0))
133 return (&pch->sq[tc]);
138 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb)
141 switch(sq->min_inline_mode) {
142 case MLX5_INLINE_MODE_NONE:
144 * When inline mode is NONE, we do not need to copy
145 * headers into WQEs, except when vlan tag framing is
146 * requested. Hardware might offload vlan tagging on
147 * transmit. This is a separate capability, which is
148 * known to be disabled on ConnectX-5 due to a hardware
149 * bug RM 931383. If vlan_inline_cap is not present and
150 * the packet has vlan tag, fall back to inlining.
152 if ((mb->m_flags & M_VLANTAG) != 0 &&
153 sq->vlan_inline_cap == 0)
156 case MLX5_INLINE_MODE_L2:
158 * Due to hardware limitations, when trust mode is
159 * DSCP, the hardware may request MLX5_INLINE_MODE_L2
160 * while it really needs all L2 headers and the 4 first
161 * bytes of the IP header (which include the
162 * TOS/traffic-class).
164 * To avoid doing a firmware command for querying the
165 * trust state and parsing the mbuf for doing
166 * unnecessary checks (VLAN/eth_type) in the fast path,
167 * we are going for the worth case (22 Bytes) if
168 * the mb->m_pkthdr.len allows it.
170 if (mb->m_pkthdr.len > ETHER_HDR_LEN +
171 ETHER_VLAN_ENCAP_LEN + 4)
172 return (MIN(sq->max_inline, ETHER_HDR_LEN +
173 ETHER_VLAN_ENCAP_LEN + 4));
176 return (MIN(sq->max_inline, mb->m_pkthdr.len));
180 * This function parse IPv4 and IPv6 packets looking for TCP and UDP
183 * The return value indicates the number of bytes from the beginning
184 * of the packet until the first byte after the TCP or UDP header. If
185 * this function returns zero, the parsing failed.
188 mlx5e_get_header_size(const struct mbuf *mb)
190 const struct ether_vlan_header *eh;
191 const struct tcphdr *th;
193 int ip_hlen, tcp_hlen;
194 const struct ip6_hdr *ip6;
198 eh = mtod(mb, const struct ether_vlan_header *);
199 if (unlikely(mb->m_len < ETHER_HDR_LEN))
201 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
202 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)))
204 eth_type = ntohs(eh->evl_proto);
205 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
207 eth_type = ntohs(eh->evl_encap_proto);
208 eth_hdr_len = ETHER_HDR_LEN;
212 ip = (const struct ip *)(mb->m_data + eth_hdr_len);
213 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip)))
215 if (ip->ip_p != IPPROTO_TCP)
217 ip_hlen = ip->ip_hl << 2;
218 eth_hdr_len += ip_hlen;
221 ip6 = (const struct ip6_hdr *)(mb->m_data + eth_hdr_len);
222 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip6)))
224 if (ip6->ip6_nxt != IPPROTO_TCP)
226 eth_hdr_len += sizeof(*ip6);
231 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*th))) {
232 const struct mbuf *m_th = mb->m_next;
233 if (unlikely(mb->m_len != eth_hdr_len ||
234 m_th == NULL || m_th->m_len < sizeof(*th)))
236 th = (const struct tcphdr *)(m_th->m_data);
238 th = (const struct tcphdr *)(mb->m_data + eth_hdr_len);
240 tcp_hlen = th->th_off << 2;
241 eth_hdr_len += tcp_hlen;
243 * m_copydata() will be used on the remaining header which
244 * does not need to reside within the first m_len bytes of
247 if (unlikely(mb->m_pkthdr.len < eth_hdr_len))
249 return (eth_hdr_len);
253 * The return value is not going back to the stack because of
257 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
259 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
260 struct mlx5_wqe_data_seg *dseg;
261 struct mlx5e_tx_wqe *wqe;
266 struct mbuf *mb = *mbp;
273 * Return ENOBUFS if the queue is full, this may trigger reinsertion
274 * of the mbuf into the drbr (see mlx5e_xmit_locked)
276 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
281 /* Align SQ edge with NOPs to avoid WQE wrap around */
282 pi = ((~sq->pc) & sq->wq.sz_m1);
283 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
284 /* Send one multi NOP message instead of many */
285 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
286 pi = ((~sq->pc) & sq->wq.sz_m1);
287 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
293 /* Setup local variables */
294 pi = sq->pc & sq->wq.sz_m1;
295 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
298 memset(wqe, 0, sizeof(*wqe));
300 /* Send a copy of the frame to the BPF listener, if any */
301 if (ifp != NULL && ifp->if_bpf != NULL)
302 ETHER_BPF_MTAP(ifp, mb);
304 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
305 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
307 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
308 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
310 if (wqe->eth.cs_flags == 0) {
311 sq->stats.csum_offload_none++;
313 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
315 u32 mss = mb->m_pkthdr.tso_segsz;
318 wqe->eth.mss = cpu_to_be16(mss);
319 opcode = MLX5_OPCODE_LSO;
320 ihs = mlx5e_get_header_size(mb);
321 payload_len = mb->m_pkthdr.len - ihs;
322 if (payload_len == 0)
325 num_pkts = DIV_ROUND_UP(payload_len, mss);
326 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs);
328 sq->stats.tso_packets++;
329 sq->stats.tso_bytes += payload_len;
331 opcode = MLX5_OPCODE_SEND;
332 ihs = mlx5e_get_inline_hdr_size(sq, mb);
333 sq->mbuf[pi].num_bytes = max_t (unsigned int,
334 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
337 if ((mb->m_flags & M_VLANTAG) != 0) {
338 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */
339 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
341 wqe->eth.inline_hdr_sz = 0;
344 if ((mb->m_flags & M_VLANTAG) != 0) {
345 struct ether_vlan_header *eh = (struct ether_vlan_header
346 *)wqe->eth.inline_hdr_start;
349 if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
350 ihs = (MLX5E_MAX_TX_INLINE -
351 ETHER_VLAN_ENCAP_LEN);
352 else if (ihs < ETHER_HDR_LEN) {
356 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
357 m_adj(mb, ETHER_HDR_LEN);
358 /* Insert 4 bytes VLAN tag into data stream */
359 eh->evl_proto = eh->evl_encap_proto;
360 eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
361 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
362 /* Copy rest of header data, if any */
363 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh +
365 m_adj(mb, ihs - ETHER_HDR_LEN);
366 /* Extend header by 4 bytes */
367 ihs += ETHER_VLAN_ENCAP_LEN;
369 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
372 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
375 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
376 if (ihs > sizeof(wqe->eth.inline_hdr_start)) {
377 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
380 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
382 /* Trim off empty mbufs */
383 while (mb->m_len == 0) {
385 /* Check if all data has been inlined */
390 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
391 mb, segs, &nsegs, BUS_DMA_NOWAIT);
394 * Update *mbp before defrag in case it was trimmed in the
398 /* Update statistics */
399 sq->stats.defragged++;
400 /* Too many mbuf fragments */
401 mb = m_defrag(*mbp, M_NOWAIT);
407 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
408 mb, segs, &nsegs, BUS_DMA_NOWAIT);
414 for (x = 0; x != nsegs; x++) {
415 if (segs[x].ds_len == 0)
417 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
418 dseg->lkey = sq->mkey_be;
419 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
423 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
425 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
426 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
427 if (mlx5e_do_send_cqe(sq))
428 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
430 wqe->ctrl.fm_ce_se = 0;
432 /* Copy data for doorbell */
433 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
435 /* Store pointer to mbuf */
436 sq->mbuf[pi].mbuf = mb;
437 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
438 sq->pc += sq->mbuf[pi].num_wqebbs;
440 /* Make sure all mbuf data is written to RAM */
442 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, BUS_DMASYNC_PREWRITE);
445 *mbp = NULL; /* safety clear */
456 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
461 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
462 * otherwise a cq overrun may occur
467 struct mlx5_cqe64 *cqe;
472 cqe = mlx5e_get_cqe(&sq->cq);
476 mlx5_cqwq_pop(&sq->cq.wq);
478 /* update budget according to the event factor */
479 budget -= sq->cev_factor;
481 for (x = 0; x != sq->cev_factor; x++) {
482 ci = sqcc & sq->wq.sz_m1;
483 mb = sq->mbuf[ci].mbuf;
484 sq->mbuf[ci].mbuf = NULL; /* Safety clear */
487 if (sq->mbuf[ci].num_bytes == 0) {
492 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
493 BUS_DMASYNC_POSTWRITE);
494 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
496 /* Free transmitted mbuf */
499 sqcc += sq->mbuf[ci].num_wqebbs;
503 mlx5_cqwq_update_db_record(&sq->cq.wq);
505 /* Ensure cq space is freed before enabling more cqes */
510 if (sq->sq_tq != NULL &&
511 atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY))
512 taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
516 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
521 if (likely(mb != NULL)) {
523 * If we can't insert mbuf into drbr, try to xmit anyway.
524 * We keep the error we got so we could return that after xmit.
526 err = drbr_enqueue(ifp, sq->br, mb);
530 * Check if the network interface is closed or if the SQ is
533 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
537 /* Process the queue */
538 while ((next = drbr_peek(ifp, sq->br)) != NULL) {
539 if (mlx5e_sq_xmit(sq, &next) != 0) {
541 drbr_putback(ifp, sq->br, next);
542 atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL);
546 drbr_advance(ifp, sq->br);
548 /* Check if we need to write the doorbell */
549 if (likely(sq->doorbell.d64 != 0)) {
550 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
551 sq->doorbell.d64 = 0;
554 * Check if we need to start the event timer which flushes the
555 * transmit ring on timeout:
557 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
558 sq->cev_factor != 1)) {
559 /* start the timer */
560 mlx5e_sq_cev_timeout(sq);
562 /* don't send NOPs yet */
563 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
569 mlx5e_xmit_locked_no_br(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
573 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
580 if (mlx5e_sq_xmit(sq, &mb) != 0) {
581 /* NOTE: m_freem() is NULL safe */
586 /* Check if we need to write the doorbell */
587 if (likely(sq->doorbell.d64 != 0)) {
588 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
589 sq->doorbell.d64 = 0;
593 * Check if we need to start the event timer which flushes the
594 * transmit ring on timeout:
596 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
597 sq->cev_factor != 1)) {
598 /* start the timer */
599 mlx5e_sq_cev_timeout(sq);
601 /* don't send NOPs yet */
602 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
608 mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
613 sq = mlx5e_select_queue(ifp, mb);
614 if (unlikely(sq == NULL)) {
615 /* Invalid send queue */
620 if (unlikely(sq->br == NULL)) {
621 /* rate limited traffic */
623 ret = mlx5e_xmit_locked_no_br(ifp, sq, mb);
624 mtx_unlock(&sq->lock);
625 } else if (mtx_trylock(&sq->lock)) {
626 ret = mlx5e_xmit_locked(ifp, sq, mb);
627 mtx_unlock(&sq->lock);
629 ret = drbr_enqueue(ifp, sq->br, mb);
630 taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
637 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
639 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
641 mtx_lock(&sq->comp_lock);
642 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
643 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
644 mtx_unlock(&sq->comp_lock);
648 mlx5e_tx_que(void *context, int pending)
650 struct mlx5e_sq *sq = context;
651 struct ifnet *ifp = sq->ifp;
653 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
655 if (!drbr_empty(ifp, sq->br))
656 mlx5e_xmit_locked(ifp, sq, NULL);
657 mtx_unlock(&sq->lock);