1 /*************************************************************************
2 Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following
15 disclaimer in the documentation and/or other materials provided
16 with the distribution.
18 * Neither the name of Cavium Networks nor the names of
19 its contributors may be used to endorse or promote products
20 derived from this software without specific prior written
23 This Software, including technical data, may be subject to U.S. export control laws, including the U.S. Export Administration Act and its associated regulations, and may be subject to export or import regulations in other countries.
25 TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
26 AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
28 *************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
39 #include <sys/socket.h>
42 #include <net/ethernet.h>
45 #include "wrapper-cvmx-includes.h"
46 #include "ethernet-headers.h"
48 /* You can define GET_MBUF_QOS() to override how the mbuf output function
49 determines which output queue is used. The default implementation
50 always uses the base queue for the port. If, for example, you wanted
51 to use the m->priority fieid, define GET_MBUF_QOS as:
52 #define GET_MBUF_QOS(m) ((m)->priority) */
54 #define GET_MBUF_QOS(m) 0
61 * @param m Packet to send
62 * @param dev Device info structure
63 * @return Always returns zero
65 int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
67 cvmx_pko_command_word0_t pko_command;
68 cvmx_buf_ptr_t hw_buffer;
71 cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
73 int32_t buffers_to_free;
76 /* Prefetch the private data structure.
77 It is larger that one cache line */
78 CVMX_PREFETCH(priv, 0);
80 /* Start off assuming no drop */
83 /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
84 remove "qos" in the event neither interface supports multiple queues
86 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
87 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
88 qos = GET_MBUF_QOS(m);
91 else if (qos >= cvmx_pko_get_num_queues(priv->port))
96 /* The CN3XXX series of parts has an errata (GMX-401) which causes the
97 GMX block to hang if a collision occurs towards the end of a
98 <68 byte packet. As a workaround for this, we pad packets to be
99 68 bytes whenever we are in half duplex mode. We don't handle
100 the case of having a small packet but no room to add the padding.
101 The kernel should always give us at least a cache line */
102 if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
103 cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
104 int interface = INTERFACE(priv->port);
105 int index = INDEX(priv->port);
108 /* We only need to pad packet in half duplex mode */
109 gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
110 if (gmx_prt_cfg.s.duplex == 0) {
111 static uint8_t pad[64];
113 if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
114 printf("%s: unable to padd small packet.", __func__);
120 * If the packet is not fragmented.
122 if (m->m_pkthdr.len == m->m_len) {
123 /* Build the PKO buffer pointer */
125 hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
126 hw_buffer.s.pool = 0;
127 hw_buffer.s.size = m->m_len;
129 /* Build the PKO command */
131 pko_command.s.segs = 1;
132 pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA. */
141 * The packet is fragmented, we need to send a list of segments
142 * in memory we borrow from the WQE pool.
144 work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
152 gp = (uint64_t *)work;
153 for (n = m; n != NULL; n = n->m_next) {
154 if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t))
155 panic("%s: too many segments in packet; call m_collapse().", __func__);
157 /* Build the PKO buffer pointer */
159 hw_buffer.s.i = 1; /* Do not put this buffer into the FPA. */
160 hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data);
161 hw_buffer.s.pool = 0;
162 hw_buffer.s.size = n->m_len;
164 *gp++ = hw_buffer.u64;
168 /* Build the PKO buffer gather list pointer */
170 hw_buffer.s.addr = cvmx_ptr_to_phys(work);
171 hw_buffer.s.pool = CVMX_FPA_WQE_POOL;
172 hw_buffer.s.size = segs;
174 /* Build the PKO command */
176 pko_command.s.segs = segs;
177 pko_command.s.gather = 1;
178 pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA. */
181 /* Finish building the PKO command */
182 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
183 pko_command.s.reg0 = priv->fau+qos*4;
184 pko_command.s.total_bytes = m->m_pkthdr.len;
185 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
186 pko_command.s.subone0 = 1;
188 /* Check if we can use the hardware checksumming */
189 if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
190 /* Use hardware checksum calc */
191 pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
196 * Could use a different free queue (and different FAU address) per
197 * core instead of per QoS, to reduce contention here.
199 IF_LOCK(&priv->tx_free_queue[qos]);
200 /* Get the number of mbufs in use by the hardware */
201 in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
202 buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
204 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);
206 /* Drop this packet if we have too many already queued to the HW */
207 if (_IF_QFULL(&priv->tx_free_queue[qos])) {
210 /* Send the packet to the output queue */
212 if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
213 DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
217 if (__predict_false(dropped)) {
219 cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
222 /* Put this packet on the queue to be freed later */
223 _IF_ENQUEUE(&priv->tx_free_queue[qos], m);
225 /* Pass it to any BPF listeners. */
226 ETHER_BPF_MTAP(ifp, m);
229 ifp->if_obytes += m->m_pkthdr.len;
232 /* Free mbufs not in use by the hardware */
233 if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
234 while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
235 _IF_DEQUEUE(&priv->tx_free_queue[qos], m);
239 IF_UNLOCK(&priv->tx_free_queue[qos]);
246 * This function frees all mbufs that are currenty queued for TX.
248 * @param dev Device being shutdown
250 void cvm_oct_tx_shutdown(struct ifnet *ifp)
252 cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
255 for (qos = 0; qos < 16; qos++) {
256 IF_DRAIN(&priv->tx_free_queue[qos]);