1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
49 * Support functions for managing the MII management port
51 * <hr>$Revision: 49628 $<hr>
54 #include "cvmx-bootmem.h"
55 #include "cvmx-spinlock.h"
56 #include "cvmx-mdio.h"
57 #include "cvmx-mgmt-port.h"
58 #include "cvmx-sysinfo.h"
59 #include "cvmx-error.h"
62 * Enum of MIX interface modes
66 CVMX_MGMT_PORT_NONE = 0,
67 CVMX_MGMT_PORT_MII_MODE,
68 CVMX_MGMT_PORT_RGMII_MODE,
69 } cvmx_mgmt_port_mode_t;
72 * Format of the TX/RX ring buffer entries
79 uint64_t reserved_62_63 : 2;
80 uint64_t len : 14; /* Length of the buffer/packet in bytes */
81 uint64_t tstamp : 1; /* For TX, signals that the packet should be timestamped */
82 uint64_t code : 7; /* The RX error code */
83 uint64_t addr : 40; /* Physical address of the buffer */
85 } cvmx_mgmt_port_ring_entry_t;
88 * Per port state required for each mgmt port
92 cvmx_spinlock_t lock; /* Used for exclusive access to this structure */
93 int tx_write_index; /* Where the next TX will write in the tx_ring and tx_buffers */
94 int rx_read_index; /* Where the next RX will be in the rx_ring and rx_buffers */
95 int port; /* Port to use. (This is the 'fake' IPD port number */
96 uint64_t mac; /* Our MAC address */
97 cvmx_mgmt_port_ring_entry_t tx_ring[CVMX_MGMT_PORT_NUM_TX_BUFFERS];
98 cvmx_mgmt_port_ring_entry_t rx_ring[CVMX_MGMT_PORT_NUM_RX_BUFFERS];
99 char tx_buffers[CVMX_MGMT_PORT_NUM_TX_BUFFERS][CVMX_MGMT_PORT_TX_BUFFER_SIZE];
100 char rx_buffers[CVMX_MGMT_PORT_NUM_RX_BUFFERS][CVMX_MGMT_PORT_RX_BUFFER_SIZE];
101 cvmx_mgmt_port_mode_t mode; /* Mode of the interface */
102 } cvmx_mgmt_port_state_t;
105 * Pointers to each mgmt port's state
107 CVMX_SHARED cvmx_mgmt_port_state_t *cvmx_mgmt_port_state_ptr = NULL;
111 * Return the number of management ports supported by this chip
113 * @return Number of ports
115 static int __cvmx_mgmt_port_num_ports(void)
117 if (OCTEON_IS_MODEL(OCTEON_CN56XX))
119 else if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
127 * Called to initialize a management port for use. Multiple calls
128 * to this function across applications is safe.
130 * @param port Port to initialize
132 * @return CVMX_MGMT_PORT_SUCCESS or an error code
134 cvmx_mgmt_port_result_t cvmx_mgmt_port_initialize(int port)
136 char *alloc_name = "cvmx_mgmt_port";
137 cvmx_mixx_oring1_t oring1;
138 cvmx_mixx_ctl_t mix_ctl;
140 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
141 return CVMX_MGMT_PORT_INVALID_PARAM;
143 cvmx_mgmt_port_state_ptr = cvmx_bootmem_alloc_named(CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t), 128, alloc_name);
144 if (cvmx_mgmt_port_state_ptr)
146 memset(cvmx_mgmt_port_state_ptr, 0, CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t));
150 const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name);
152 cvmx_mgmt_port_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
155 cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Unable to get named block %s on MIX%d.\n", alloc_name, port);
156 return CVMX_MGMT_PORT_NO_MEMORY;
160 /* Reset the MIX block if the previous user had a different TX ring size, or if
161 ** we allocated a new (and blank) state structure. */
162 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
163 if (!mix_ctl.s.reset)
165 oring1.u64 = cvmx_read_csr(CVMX_MIXX_ORING1(port));
166 if (oring1.s.osize != CVMX_MGMT_PORT_NUM_TX_BUFFERS || cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
168 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
170 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
173 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
174 } while (mix_ctl.s.busy);
176 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
177 cvmx_read_csr(CVMX_MIXX_CTL(port));
178 memset(cvmx_mgmt_port_state_ptr + port, 0, sizeof(cvmx_mgmt_port_state_t));
182 if (cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
184 cvmx_mgmt_port_state_t *state = cvmx_mgmt_port_state_ptr + port;
186 cvmx_mixx_bist_t mix_bist;
187 cvmx_agl_gmx_bist_t agl_gmx_bist;
188 cvmx_mixx_oring1_t oring1;
189 cvmx_mixx_iring1_t iring1;
190 cvmx_mixx_ctl_t mix_ctl;
191 cvmx_agl_prtx_ctl_t agl_prtx_ctl;
193 /* Make sure BIST passed */
194 mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(port));
196 cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port MIX failed BIST (0x%016llx) on MIX%d\n", CAST64(mix_bist.u64), port);
198 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
199 if (agl_gmx_bist.u64)
200 cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port AGL failed BIST (0x%016llx) on MIX%d\n", CAST64(agl_gmx_bist.u64), port);
202 /* Clear all state information */
203 memset(state, 0, sizeof(*state));
205 /* Take the control logic out of reset */
206 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
208 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
210 /* Read until reset == 0. Timeout should never happen... */
211 if (CVMX_WAIT_FOR_FIELD64(CVMX_MIXX_CTL(port), cvmx_mixx_ctl_t, reset, ==, 0, 300000000))
213 cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Timeout waiting for MIX(%d) reset.\n", port);
214 return CVMX_MGMT_PORT_INIT_ERROR;
217 /* Set the PHY address and mode of the interface (RGMII/MII mode). */
218 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
221 state->mode = CVMX_MGMT_PORT_MII_MODE;
225 int port_num = CVMX_HELPER_BOARD_MGMT_IPD_PORT + port;
226 int phy_addr = cvmx_helper_board_get_mii_address(port_num);
229 cvmx_mdio_phy_reg_status_t phy_status;
230 /* Read PHY status register to find the mode of the interface. */
231 phy_status.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_STATUS);
232 if (phy_status.s.capable_extended_status == 0) // MII mode
233 state->mode = CVMX_MGMT_PORT_MII_MODE;
234 else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)
235 && phy_status.s.capable_extended_status) // RGMII mode
236 state->mode = CVMX_MGMT_PORT_RGMII_MODE;
238 state->mode = CVMX_MGMT_PORT_NONE;
242 cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Not able to read the PHY on MIX%d\n", port);
243 return CVMX_MGMT_PORT_INVALID_PARAM;
245 state->port = port_num;
248 /* All interfaces should be configured in same mode */
249 for (i = 0; i < __cvmx_mgmt_port_num_ports(); i++)
252 && cvmx_mgmt_port_state_ptr[i].mode != CVMX_MGMT_PORT_NONE
253 && cvmx_mgmt_port_state_ptr[i].mode != state->mode)
255 cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: All ports in MIX interface are not configured in same mode.\n \
256 Port %d is configured as %d\n \
257 And Port %d is configured as %d\n", port, state->mode, i, cvmx_mgmt_port_state_ptr[i].mode);
258 return CVMX_MGMT_PORT_INVALID_PARAM;
262 /* Create a default MAC address */
263 state->mac = 0x000000dead000000ull;
264 state->mac += 0xffffff & CAST64(state);
266 /* Setup the TX ring */
267 for (i=0; i<CVMX_MGMT_PORT_NUM_TX_BUFFERS; i++)
269 state->tx_ring[i].s.len = CVMX_MGMT_PORT_TX_BUFFER_SIZE;
270 state->tx_ring[i].s.addr = cvmx_ptr_to_phys(state->tx_buffers[i]);
273 /* Tell the HW where the TX ring is */
275 oring1.s.obase = cvmx_ptr_to_phys(state->tx_ring)>>3;
276 oring1.s.osize = CVMX_MGMT_PORT_NUM_TX_BUFFERS;
278 cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
280 /* Setup the RX ring */
281 for (i=0; i<CVMX_MGMT_PORT_NUM_RX_BUFFERS; i++)
283 /* This size is -8 due to an errata for CN56XX pass 1 */
284 state->rx_ring[i].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
285 state->rx_ring[i].s.addr = cvmx_ptr_to_phys(state->rx_buffers[i]);
288 /* Tell the HW where the RX ring is */
290 iring1.s.ibase = cvmx_ptr_to_phys(state->rx_ring)>>3;
291 iring1.s.isize = CVMX_MGMT_PORT_NUM_RX_BUFFERS;
293 cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
294 cvmx_write_csr(CVMX_MIXX_IRING2(port), CVMX_MGMT_PORT_NUM_RX_BUFFERS);
296 /* Disable the external input/output */
297 cvmx_mgmt_port_disable(port);
299 /* Set the MAC address filtering up */
300 cvmx_mgmt_port_set_mac(port, state->mac);
302 /* Set the default max size to an MTU of 1500 with L2 and VLAN */
303 cvmx_mgmt_port_set_max_packet_size(port, 1518);
305 /* Enable the port HW. Packets are not allowed until cvmx_mgmt_port_enable() is called */
307 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
308 mix_ctl.s.en = 1; /* Enable the port */
309 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
310 mix_ctl.s.mrq_hwm = 1; /* MII CB-request FIFO programmable high watermark */
311 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
313 /* Select the mode of operation for the interface. */
314 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
316 agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
318 if (state->mode == CVMX_MGMT_PORT_RGMII_MODE)
319 agl_prtx_ctl.s.mode = 0;
320 else if (state->mode == CVMX_MGMT_PORT_MII_MODE)
321 agl_prtx_ctl.s.mode = 1;
324 cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Invalid mode for MIX(%d)\n", port);
325 return CVMX_MGMT_PORT_INVALID_PARAM;
328 cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
331 /* Initialize the physical layer. */
332 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
334 /* MII clocks counts are based on the 125Mhz reference, so our
335 delays need to be scaled to match the core clock rate. The
336 "+1" is to make sure rounding always waits a little too
338 uint64_t clock_scale = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 125000000 + 1;
340 /* Take the DLL and clock tree out of reset */
341 agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
342 agl_prtx_ctl.s.clkrst = 0;
343 if (state->mode == CVMX_MGMT_PORT_RGMII_MODE) // RGMII Initialization
345 agl_prtx_ctl.s.dllrst = 0;
346 agl_prtx_ctl.s.clktx_byp = 0;
348 cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
349 cvmx_read_csr(CVMX_AGL_PRTX_CTL(port)); /* Force write out before wait */
351 /* Wait for the DLL to lock. External 125 MHz reference clock must be stable at this point. */
352 cvmx_wait(256 * clock_scale);
354 /* The rest of the config is common between RGMII/MII */
356 /* Enable the interface */
357 agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
358 agl_prtx_ctl.s.enable = 1;
359 cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
361 /* Read the value back to force the previous write */
362 agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
364 /* Enable the componsation controller */
365 agl_prtx_ctl.s.comp = 1;
366 cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
367 cvmx_read_csr(CVMX_AGL_PRTX_CTL(port)); /* Force write out before wait */
368 cvmx_wait(1024 * clock_scale); // for componsation state to lock.
370 else if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
372 /* Force compensation values, as they are not determined properly by HW */
373 cvmx_agl_gmx_drv_ctl_t drv_ctl;
375 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
378 drv_ctl.s.byp_en1 = 1;
384 drv_ctl.s.byp_en = 1;
388 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
391 cvmx_error_enable_group(CVMX_ERROR_GROUP_MGMT_PORT, port);
392 return CVMX_MGMT_PORT_SUCCESS;
397 * Shutdown a management port. This currently disables packet IO
398 * but leaves all hardware and buffers. Another application can then
399 * call initialize() without redoing the hardware setup.
401 * @param port Management port
403 * @return CVMX_MGMT_PORT_SUCCESS or an error code
405 cvmx_mgmt_port_result_t cvmx_mgmt_port_shutdown(int port)
407 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
408 return CVMX_MGMT_PORT_INVALID_PARAM;
410 cvmx_error_disable_group(CVMX_ERROR_GROUP_MGMT_PORT, port);
412 /* Stop packets from comming in */
413 cvmx_mgmt_port_disable(port);
415 /* We don't free any memory so the next intialize can reuse the HW setup */
416 return CVMX_MGMT_PORT_SUCCESS;
421 * Enable packet IO on a management port
423 * @param port Management port
425 * @return CVMX_MGMT_PORT_SUCCESS or an error code
427 cvmx_mgmt_port_result_t cvmx_mgmt_port_enable(int port)
429 cvmx_mgmt_port_state_t *state;
430 cvmx_agl_gmx_inf_mode_t agl_gmx_inf_mode;
431 cvmx_agl_gmx_rxx_frm_ctl_t rxx_frm_ctl;
433 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
434 return CVMX_MGMT_PORT_INVALID_PARAM;
436 state = cvmx_mgmt_port_state_ptr + port;
438 cvmx_spinlock_lock(&state->lock);
441 rxx_frm_ctl.s.pre_align = 1;
442 rxx_frm_ctl.s.pad_len = 1; /* When set, disables the length check for non-min sized pkts with padding in the client data */
443 rxx_frm_ctl.s.vlan_len = 1; /* When set, disables the length check for VLAN pkts */
444 rxx_frm_ctl.s.pre_free = 1; /* When set, PREAMBLE checking is less strict */
445 rxx_frm_ctl.s.ctl_smac = 0; /* Control Pause Frames can match station SMAC */
446 rxx_frm_ctl.s.ctl_mcst = 1; /* Control Pause Frames can match globally assign Multicast address */
447 rxx_frm_ctl.s.ctl_bck = 1; /* Forward pause information to TX block */
448 rxx_frm_ctl.s.ctl_drp = 1; /* Drop Control Pause Frames */
449 rxx_frm_ctl.s.pre_strp = 1; /* Strip off the preamble */
450 rxx_frm_ctl.s.pre_chk = 1; /* This port is configured to send PREAMBLE+SFD to begin every frame. GMX checks that the PREAMBLE is sent correctly */
451 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
453 /* Enable the AGL block */
454 if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
456 agl_gmx_inf_mode.u64 = 0;
457 agl_gmx_inf_mode.s.en = 1;
458 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
461 /* Configure the port duplex and enables */
462 cvmx_mgmt_port_link_set(port, cvmx_mgmt_port_link_get(port));
464 cvmx_spinlock_unlock(&state->lock);
465 return CVMX_MGMT_PORT_SUCCESS;
470 * Disable packet IO on a management port
472 * @param port Management port
474 * @return CVMX_MGMT_PORT_SUCCESS or an error code
476 cvmx_mgmt_port_result_t cvmx_mgmt_port_disable(int port)
478 cvmx_mgmt_port_state_t *state;
479 cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
481 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
482 return CVMX_MGMT_PORT_INVALID_PARAM;
484 state = cvmx_mgmt_port_state_ptr + port;
486 cvmx_spinlock_lock(&state->lock);
488 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
489 agl_gmx_prtx.s.en = 0;
490 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
492 cvmx_spinlock_unlock(&state->lock);
493 return CVMX_MGMT_PORT_SUCCESS;
498 * Send a packet out the management port. The packet is copied so
499 * the input buffer isn't used after this call.
501 * @param port Management port
502 * @param packet_len Length of the packet to send. It does not include the final CRC
503 * @param buffer Packet data
505 * @return CVMX_MGMT_PORT_SUCCESS or an error code
507 cvmx_mgmt_port_result_t cvmx_mgmt_port_send(int port, int packet_len, void *buffer)
509 cvmx_mgmt_port_state_t *state;
510 cvmx_mixx_oring2_t mix_oring2;
512 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
513 return CVMX_MGMT_PORT_INVALID_PARAM;
515 /* Max sure the packet size is valid */
516 if ((packet_len < 1) || (packet_len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
517 return CVMX_MGMT_PORT_INVALID_PARAM;
520 return CVMX_MGMT_PORT_INVALID_PARAM;
522 state = cvmx_mgmt_port_state_ptr + port;
524 cvmx_spinlock_lock(&state->lock);
526 mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
527 if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1)
529 /* No room for another packet */
530 cvmx_spinlock_unlock(&state->lock);
531 return CVMX_MGMT_PORT_NO_MEMORY;
535 /* Copy the packet into the output buffer */
536 memcpy(state->tx_buffers[state->tx_write_index], buffer, packet_len);
537 /* Insert the source MAC */
538 memcpy(state->tx_buffers[state->tx_write_index] + 6, ((char*)&state->mac) + 2, 6);
539 /* Update the TX ring buffer entry size */
540 state->tx_ring[state->tx_write_index].s.len = packet_len;
541 /* This code doesn't support TX timestamps */
542 state->tx_ring[state->tx_write_index].s.tstamp = 0;
543 /* Increment our TX index */
544 state->tx_write_index = (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
545 /* Ring the doorbell, sending the packet */
547 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
548 if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
549 cvmx_write_csr(CVMX_MIXX_ORCNT(port), cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
551 cvmx_spinlock_unlock(&state->lock);
552 return CVMX_MGMT_PORT_SUCCESS;
557 #if defined(__FreeBSD__)
559 * Send a packet out the management port. The packet is copied so
560 * the input mbuf isn't used after this call.
562 * @param port Management port
563 * @param m Packet mbuf (with pkthdr)
565 * @return CVMX_MGMT_PORT_SUCCESS or an error code
567 cvmx_mgmt_port_result_t cvmx_mgmt_port_sendm(int port, const struct mbuf *m)
569 cvmx_mgmt_port_state_t *state;
570 cvmx_mixx_oring2_t mix_oring2;
572 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
573 return CVMX_MGMT_PORT_INVALID_PARAM;
575 /* Max sure the packet size is valid */
576 if ((m->m_pkthdr.len < 1) || (m->m_pkthdr.len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
577 return CVMX_MGMT_PORT_INVALID_PARAM;
579 state = cvmx_mgmt_port_state_ptr + port;
581 cvmx_spinlock_lock(&state->lock);
583 mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
584 if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1)
586 /* No room for another packet */
587 cvmx_spinlock_unlock(&state->lock);
588 return CVMX_MGMT_PORT_NO_MEMORY;
592 /* Copy the packet into the output buffer */
593 m_copydata(m, 0, m->m_pkthdr.len, state->tx_buffers[state->tx_write_index]);
594 /* Update the TX ring buffer entry size */
595 state->tx_ring[state->tx_write_index].s.len = m->m_pkthdr.len;
596 /* This code doesn't support TX timestamps */
597 state->tx_ring[state->tx_write_index].s.tstamp = 0;
598 /* Increment our TX index */
599 state->tx_write_index = (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
600 /* Ring the doorbell, sending the packet */
602 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
603 if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
604 cvmx_write_csr(CVMX_MIXX_ORCNT(port), cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
606 cvmx_spinlock_unlock(&state->lock);
607 return CVMX_MGMT_PORT_SUCCESS;
614 * Receive a packet from the management port.
616 * @param port Management port
617 * @param buffer_len Size of the buffer to receive the packet into
618 * @param buffer Buffer to receive the packet into
620 * @return The size of the packet, or a negative erorr code on failure. Zero
621 * means that no packets were available.
623 int cvmx_mgmt_port_receive(int port, int buffer_len, uint8_t *buffer)
625 cvmx_mixx_ircnt_t mix_ircnt;
626 cvmx_mgmt_port_state_t *state;
629 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
630 return CVMX_MGMT_PORT_INVALID_PARAM;
632 /* Max sure the buffer size is valid */
634 return CVMX_MGMT_PORT_INVALID_PARAM;
637 return CVMX_MGMT_PORT_INVALID_PARAM;
639 state = cvmx_mgmt_port_state_ptr + port;
641 cvmx_spinlock_lock(&state->lock);
643 /* Find out how many RX packets are pending */
644 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
645 if (mix_ircnt.s.ircnt)
647 uint64_t *source = (void *)state->rx_buffers[state->rx_read_index];
648 uint64_t *zero_check = source;
649 /* CN56XX pass 1 has an errata where packets might start 8 bytes
650 into the buffer instead of at their correct lcoation. If the
651 first 8 bytes is zero we assume this has happened */
652 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && (*zero_check == 0))
654 /* Start off with zero bytes received */
656 /* While the completion code signals more data, copy the buffers
657 into the user's data */
658 while (state->rx_ring[state->rx_read_index].s.code == 16)
660 /* Only copy what will fit in the user's buffer */
661 int length = state->rx_ring[state->rx_read_index].s.len;
662 if (length > buffer_len)
664 memcpy(buffer, source, length);
665 /* Reduce the size of the buffer to the remaining space. If we run
666 out we will signal an error when the code 15 buffer doesn't fit */
668 buffer_len -= length;
670 /* Update this buffer for reuse in future receives. This size is
671 -8 due to an errata for CN56XX pass 1 */
672 state->rx_ring[state->rx_read_index].s.code = 0;
673 state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
674 state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
675 /* Zero the beginning of the buffer for use by the errata check */
678 /* Increment the number of RX buffers */
679 cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
680 source = (void *)state->rx_buffers[state->rx_read_index];
684 /* Check for the final good completion code */
685 if (state->rx_ring[state->rx_read_index].s.code == 15)
687 if (buffer_len >= state->rx_ring[state->rx_read_index].s.len)
689 int length = state->rx_ring[state->rx_read_index].s.len;
690 memcpy(buffer, source, length);
695 /* Not enough room for the packet */
696 cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Packet (%d) larger than supplied buffer (%d)\n", state->rx_ring[state->rx_read_index].s.len, buffer_len);
697 result = CVMX_MGMT_PORT_NO_MEMORY;
702 cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Receive error code %d. Packet dropped(Len %d), \n",
703 state->rx_ring[state->rx_read_index].s.code, state->rx_ring[state->rx_read_index].s.len + result);
704 result = -state->rx_ring[state->rx_read_index].s.code;
707 /* Check to see if we need to change the duplex. */
708 cvmx_mgmt_port_link_set(port, cvmx_mgmt_port_link_get(port));
711 /* Clean out the ring buffer entry. This size is -8 due to an errata
713 state->rx_ring[state->rx_read_index].s.code = 0;
714 state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
715 state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
716 /* Zero the beginning of the buffer for use by the errata check */
719 /* Increment the number of RX buffers */
720 cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
721 /* Decrement the pending RX count */
722 cvmx_write_csr(CVMX_MIXX_IRCNT(port), 1);
726 /* No packets available */
729 cvmx_spinlock_unlock(&state->lock);
734 * Set the MAC address for a management port
736 * @param port Management port
737 * @param mac New MAC address. The lower 6 bytes are used.
739 * @return CVMX_MGMT_PORT_SUCCESS or an error code
741 cvmx_mgmt_port_result_t cvmx_mgmt_port_set_mac(int port, uint64_t mac)
743 cvmx_mgmt_port_state_t *state;
744 cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
746 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
747 return CVMX_MGMT_PORT_INVALID_PARAM;
749 state = cvmx_mgmt_port_state_ptr + port;
751 cvmx_spinlock_lock(&state->lock);
753 agl_gmx_rxx_adr_ctl.u64 = 0;
754 agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Only accept matching MAC addresses */
755 agl_gmx_rxx_adr_ctl.s.mcst = 0; /* Drop multicast */
756 agl_gmx_rxx_adr_ctl.s.bcst = 1; /* Allow broadcast */
757 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
759 /* Only using one of the CAMs */
760 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), (mac >> 40) & 0xff);
761 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), (mac >> 32) & 0xff);
762 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), (mac >> 24) & 0xff);
763 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), (mac >> 16) & 0xff);
764 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), (mac >> 8) & 0xff);
765 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), (mac >> 0) & 0xff);
766 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
769 cvmx_spinlock_unlock(&state->lock);
770 return CVMX_MGMT_PORT_SUCCESS;
775 * Get the MAC address for a management port
777 * @param port Management port
779 * @return MAC address
781 uint64_t cvmx_mgmt_port_get_mac(int port)
783 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
784 return CVMX_MGMT_PORT_INVALID_PARAM;
786 return cvmx_mgmt_port_state_ptr[port].mac;
790 * Set the multicast list.
792 * @param port Management port
793 * @param flags Interface flags
797 void cvmx_mgmt_port_set_multicast_list(int port, int flags)
799 cvmx_mgmt_port_state_t *state;
800 cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
802 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
805 state = cvmx_mgmt_port_state_ptr + port;
807 cvmx_spinlock_lock(&state->lock);
809 agl_gmx_rxx_adr_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port));
811 /* Allow broadcast MAC addresses */
812 if (!agl_gmx_rxx_adr_ctl.s.bcst)
813 agl_gmx_rxx_adr_ctl.s.bcst = 1;
815 if ((flags & CVMX_IFF_ALLMULTI) || (flags & CVMX_IFF_PROMISC))
816 agl_gmx_rxx_adr_ctl.s.mcst = 2; /* Force accept multicast packets */
818 agl_gmx_rxx_adr_ctl.s.mcst = 1; /* Force reject multicast packets */
820 if (flags & CVMX_IFF_PROMISC)
821 agl_gmx_rxx_adr_ctl.s.cam_mode = 0; /* Reject matches if promisc. Since CAM is shut off, should accept everything */
823 agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Filter packets based on the CAM */
825 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
827 if (flags & CVMX_IFF_PROMISC)
828 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
830 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
832 cvmx_spinlock_unlock(&state->lock);
837 * Set the maximum packet allowed in. Size is specified
838 * including L2 but without FCS. A normal MTU would corespond
839 * to 1514 assuming the standard 14 byte L2 header.
841 * @param port Management port
842 * @param size_without_fcs
843 * Size in bytes without FCS
845 void cvmx_mgmt_port_set_max_packet_size(int port, int size_without_fcs)
847 cvmx_mgmt_port_state_t *state;
849 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
852 state = cvmx_mgmt_port_state_ptr + port;
854 cvmx_spinlock_lock(&state->lock);
855 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
856 cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), (size_without_fcs+7) & 0xfff8);
857 cvmx_spinlock_unlock(&state->lock);
861 * Return the link state of an RGMII/MII port as returned by
862 * auto negotiation. The result of this function may not match
863 * Octeon's link config if auto negotiation has changed since
864 * the last call to cvmx_mgmt_port_link_set().
866 * @param port The RGMII/MII interface port to query
870 cvmx_helper_link_info_t cvmx_mgmt_port_link_get(int port)
872 cvmx_mgmt_port_state_t *state;
873 cvmx_helper_link_info_t result;
875 state = cvmx_mgmt_port_state_ptr + port;
878 if (port > __cvmx_mgmt_port_num_ports())
880 cvmx_dprintf("WARNING: Invalid port %d\n", port);
884 if (state->port != -1)
885 return __cvmx_helper_board_link_get(state->port);
886 else // Simulator does not have PHY, use some defaults.
888 result.s.full_duplex = 1;
889 result.s.link_up = 1;
890 result.s.speed = 100;
897 * Configure RGMII/MII port for the specified link state. This
898 * function does not influence auto negotiation at the PHY level.
900 * @param port RGMII/MII interface port
901 * @param link_info The new link state
903 * @return Zero on success, negative on failure
905 int cvmx_mgmt_port_link_set(int port, cvmx_helper_link_info_t link_info)
907 cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
909 /* Disable GMX before we make any changes. */
910 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
911 agl_gmx_prtx.s.en = 0;
912 agl_gmx_prtx.s.tx_en = 0;
913 agl_gmx_prtx.s.rx_en = 0;
914 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
916 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
918 uint64_t one_second = cvmx_clock_get_rate(CVMX_CLOCK_CORE);
919 /* Wait for GMX to be idle */
920 if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port), cvmx_agl_gmx_prtx_cfg_t, rx_idle, ==, 1, one_second)
921 || CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port), cvmx_agl_gmx_prtx_cfg_t, tx_idle, ==, 1, one_second))
923 cvmx_dprintf("MIX%d: Timeout waiting for GMX to be idle\n", port);
928 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
930 /* Set duplex mode */
931 if (!link_info.s.link_up)
932 agl_gmx_prtx.s.duplex = 1; /* Force full duplex on down links */
934 agl_gmx_prtx.s.duplex = link_info.s.full_duplex;
936 switch(link_info.s.speed)
939 agl_gmx_prtx.s.speed = 0;
940 agl_gmx_prtx.s.slottime = 0;
941 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
943 agl_gmx_prtx.s.speed_msb = 1;
944 agl_gmx_prtx.s.burst = 1;
949 agl_gmx_prtx.s.speed = 0;
950 agl_gmx_prtx.s.slottime = 0;
951 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
953 agl_gmx_prtx.s.speed_msb = 0;
954 agl_gmx_prtx.s.burst = 1;
959 /* 1000 MBits is only supported on 6XXX chips */
960 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
962 agl_gmx_prtx.s.speed_msb = 0;
963 agl_gmx_prtx.s.speed = 1;
964 agl_gmx_prtx.s.slottime = 1; /* Only matters for half-duplex */
965 agl_gmx_prtx.s.burst = agl_gmx_prtx.s.duplex;
975 /* Write the new GMX setting with the port still disabled. */
976 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
978 /* Read GMX CFG again to make sure the config is completed. */
979 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
982 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
984 cvmx_mgmt_port_state_t *state = cvmx_mgmt_port_state_ptr + port;
985 cvmx_agl_gmx_txx_clk_t agl_clk;
986 agl_clk.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_CLK(port));
987 agl_clk.s.clk_cnt = 1; /* MII (both speeds) and RGMII 1000 setting */
988 if (state->mode == CVMX_MGMT_PORT_RGMII_MODE)
990 if (link_info.s.speed == 10)
991 agl_clk.s.clk_cnt = 50;
992 else if (link_info.s.speed == 100)
993 agl_clk.s.clk_cnt = 5;
995 cvmx_write_csr(CVMX_AGL_GMX_TXX_CLK(port), agl_clk.u64);
998 /* Enable transmit and receive ports */
999 agl_gmx_prtx.s.tx_en = 1;
1000 agl_gmx_prtx.s.rx_en = 1;
1001 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
1003 /* Enable the link. */
1004 agl_gmx_prtx.s.en = 1;
1005 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);