2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <machine/atomic.h>
32 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt, bool notify_hw)
34 u16 pi = sq->pc & sq->wq.sz_m1;
35 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
37 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
39 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
40 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
41 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
43 sq->mbuf[pi].mbuf = NULL;
44 sq->mbuf[pi].num_bytes = 0;
45 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
46 sq->pc += sq->mbuf[pi].num_wqebbs;
48 mlx5e_tx_notify_hw(sq, wqe, 0);
51 #if (__FreeBSD_version >= 1100000)
52 static uint32_t mlx5e_hash_value;
55 mlx5e_hash_init(void *arg)
57 mlx5e_hash_value = m_ether_tcpip_hash_init();
60 /* Make kernel call mlx5e_hash_init after the random stack finished initializing */
61 SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
64 static struct mlx5e_sq *
65 mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
67 struct mlx5e_priv *priv = ifp->if_softc;
71 /* check if channels are successfully opened */
72 if (unlikely(priv->channel == NULL))
75 /* obtain VLAN information if present */
76 if (mb->m_flags & M_VLANTAG) {
77 tc = (mb->m_pkthdr.ether_vtag >> 13);
78 if (tc >= priv->num_tc)
79 tc = priv->default_vlan_prio;
81 tc = priv->default_vlan_prio;
84 ch = priv->params.num_channels;
86 /* check if flowid is set */
87 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
91 if (rss_hash2bucket(mb->m_pkthdr.flowid,
92 M_HASHTYPE_GET(mb), &temp) == 0)
96 ch = (mb->m_pkthdr.flowid % 128) % ch;
98 #if (__FreeBSD_version >= 1100000)
99 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
100 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
103 * m_ether_tcpip_hash not present in stable, so just
104 * throw unhashed mbufs on queue 0
110 /* check if channel is allocated */
111 if (unlikely(priv->channel[ch] == NULL))
114 return (&priv->channel[ch]->sq[tc]);
118 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb)
120 return (MIN(MLX5E_MAX_TX_INLINE, mb->m_len));
124 mlx5e_get_header_size(struct mbuf *mb)
126 struct ether_vlan_header *eh;
129 int ip_hlen, tcp_hlen;
134 eh = mtod(mb, struct ether_vlan_header *);
135 if (mb->m_len < ETHER_HDR_LEN)
137 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
138 eth_type = ntohs(eh->evl_proto);
139 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
141 eth_type = ntohs(eh->evl_encap_proto);
142 eth_hdr_len = ETHER_HDR_LEN;
144 if (mb->m_len < eth_hdr_len)
148 ip = (struct ip *)(mb->m_data + eth_hdr_len);
149 if (mb->m_len < eth_hdr_len + sizeof(*ip))
151 if (ip->ip_p != IPPROTO_TCP)
153 ip_hlen = ip->ip_hl << 2;
154 eth_hdr_len += ip_hlen;
157 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
158 if (mb->m_len < eth_hdr_len + sizeof(*ip6))
160 if (ip6->ip6_nxt != IPPROTO_TCP)
162 eth_hdr_len += sizeof(*ip6);
167 if (mb->m_len < eth_hdr_len + sizeof(*th))
169 th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
170 tcp_hlen = th->th_off << 2;
171 eth_hdr_len += tcp_hlen;
172 if (mb->m_len < eth_hdr_len)
174 return (eth_hdr_len);
178 * The return value is not going back to the stack because of
182 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
184 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
185 struct mlx5_wqe_data_seg *dseg;
186 struct mlx5e_tx_wqe *wqe;
191 struct mbuf *mb = *mbp;
198 * Return ENOBUFS if the queue is full, this may trigger reinsertion
199 * of the mbuf into the drbr (see mlx5e_xmit_locked)
201 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
205 /* Align SQ edge with NOPs to avoid WQE wrap around */
206 pi = ((~sq->pc) & sq->wq.sz_m1);
207 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
208 /* Send one multi NOP message instead of many */
209 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS, false);
210 pi = ((~sq->pc) & sq->wq.sz_m1);
211 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
217 /* Setup local variables */
218 pi = sq->pc & sq->wq.sz_m1;
219 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
220 ifp = sq->channel->ifp;
222 memset(wqe, 0, sizeof(*wqe));
224 /* Send a copy of the frame to the BPF listener, if any */
225 if (ifp != NULL && ifp->if_bpf != NULL)
226 ETHER_BPF_MTAP(ifp, mb);
228 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
229 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
231 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
232 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
234 if (wqe->eth.cs_flags == 0) {
235 sq->stats.csum_offload_none++;
237 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
239 u32 mss = mb->m_pkthdr.tso_segsz;
242 wqe->eth.mss = cpu_to_be16(mss);
243 opcode = MLX5_OPCODE_LSO;
244 ihs = mlx5e_get_header_size(mb);
245 payload_len = mb->m_pkthdr.len - ihs;
246 if (payload_len == 0)
249 num_pkts = DIV_ROUND_UP(payload_len, mss);
250 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs);
252 sq->stats.tso_packets++;
253 sq->stats.tso_bytes += payload_len;
255 opcode = MLX5_OPCODE_SEND;
256 ihs = mlx5e_get_inline_hdr_size(sq, mb);
257 sq->mbuf[pi].num_bytes = max_t (unsigned int,
258 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
260 if (mb->m_flags & M_VLANTAG) {
261 struct ether_vlan_header *eh =
262 (struct ether_vlan_header *)wqe->eth.inline_hdr_start;
265 if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
266 ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
267 else if (ihs < ETHER_HDR_LEN) {
271 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
272 m_adj(mb, ETHER_HDR_LEN);
273 /* Insert 4 bytes VLAN tag into data stream */
274 eh->evl_proto = eh->evl_encap_proto;
275 eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
276 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
277 /* Copy rest of header data, if any */
278 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
279 m_adj(mb, ihs - ETHER_HDR_LEN);
280 /* Extend header by 4 bytes */
281 ihs += ETHER_VLAN_ENCAP_LEN;
283 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
287 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
289 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
290 if (likely(ihs > sizeof(wqe->eth.inline_hdr_start))) {
291 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
294 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
296 /* Trim off empty mbufs */
297 while (mb->m_len == 0) {
299 /* Check if all data has been inlined */
304 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
305 mb, segs, &nsegs, BUS_DMA_NOWAIT);
308 * Update *mbp before defrag in case it was trimmed in the
312 /* Update statistics */
313 sq->stats.defragged++;
314 /* Too many mbuf fragments */
315 mb = m_defrag(*mbp, M_NOWAIT);
321 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
322 mb, segs, &nsegs, BUS_DMA_NOWAIT);
330 for (x = 0; x != nsegs; x++) {
331 if (segs[x].ds_len == 0)
333 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
334 dseg->lkey = sq->mkey_be;
335 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
339 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
341 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
342 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
343 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
345 /* Store pointer to mbuf */
346 sq->mbuf[pi].mbuf = mb;
347 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
348 sq->pc += sq->mbuf[pi].num_wqebbs;
350 /* Make sure all mbuf data is written to RAM */
352 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, BUS_DMASYNC_PREWRITE);
354 mlx5e_tx_notify_hw(sq, wqe, 0);
367 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
372 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
373 * otherwise a cq overrun may occur
378 struct mlx5_cqe64 *cqe;
382 cqe = mlx5e_get_cqe(&sq->cq);
386 ci = sqcc & sq->wq.sz_m1;
387 mb = sq->mbuf[ci].mbuf;
388 sq->mbuf[ci].mbuf = NULL; /* Safety clear */
391 if (sq->mbuf[ci].num_bytes == 0) {
396 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
397 BUS_DMASYNC_POSTWRITE);
398 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
400 /* Free transmitted mbuf */
403 sqcc += sq->mbuf[ci].num_wqebbs;
406 mlx5_cqwq_update_db_record(&sq->cq.wq);
408 /* Ensure cq space is freed before enabling more cqes */
413 if (atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY))
414 taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
418 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
423 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
425 err = drbr_enqueue(ifp, sq->br, mb);
431 * If we can't insert mbuf into drbr, try to xmit anyway.
432 * We keep the error we got so we could return that after xmit.
434 err = drbr_enqueue(ifp, sq->br, mb);
436 /* Process the queue */
437 while ((next = drbr_peek(ifp, sq->br)) != NULL) {
438 if (mlx5e_sq_xmit(sq, &next) != 0) {
440 drbr_advance(ifp, sq->br);
442 drbr_putback(ifp, sq->br, next);
443 atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL);
447 drbr_advance(ifp, sq->br);
448 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
455 mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
460 sq = mlx5e_select_queue(ifp, mb);
461 if (unlikely(sq == NULL)) {
462 /* Invalid send queue */
466 if (mtx_trylock(&sq->lock)) {
467 ret = mlx5e_xmit_locked(ifp, sq, mb);
468 mtx_unlock(&sq->lock);
470 ret = drbr_enqueue(ifp, sq->br, mb);
471 taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
478 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
480 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
482 mtx_lock(&sq->comp_lock);
483 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
484 mlx5e_cq_arm(&sq->cq);
485 mtx_unlock(&sq->comp_lock);
489 mlx5e_tx_que(void *context, int pending)
491 struct mlx5e_sq *sq = context;
492 struct ifnet *ifp = sq->channel->ifp;
494 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
496 if (!drbr_empty(ifp, sq->br))
497 mlx5e_xmit_locked(ifp, sq, NULL);
498 mtx_unlock(&sq->lock);