1 /***********************license start***************
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
37 ***********************license end**************************************/
47 * Support functions for managing the MII management port
49 * <hr>$Revision: 42151 $<hr>
52 #include "cvmx-bootmem.h"
53 #include "cvmx-spinlock.h"
54 #include "cvmx-mdio.h"
55 #include "cvmx-mgmt-port.h"
56 #include "cvmx-sysinfo.h"
59 * Format of the TX/RX ring buffer entries
66 uint64_t reserved_62_63 : 2;
67 uint64_t len : 14; /* Length of the buffer/packet in bytes */
68 uint64_t code : 8; /* The RX error code */
69 uint64_t addr : 40; /* Physical address of the buffer */
71 } cvmx_mgmt_port_ring_entry_t;
74 * Per port state required for each mgmt port
78 cvmx_spinlock_t lock; /* Used for exclusive access to this structure */
79 int tx_write_index; /* Where the next TX will write in the tx_ring and tx_buffers */
80 int rx_read_index; /* Where the next RX will be in the rx_ring and rx_buffers */
81 int phy_id; /* The SMI/MDIO PHY address */
82 uint64_t mac; /* Our MAC address */
83 cvmx_mgmt_port_ring_entry_t tx_ring[CVMX_MGMT_PORT_NUM_TX_BUFFERS];
84 cvmx_mgmt_port_ring_entry_t rx_ring[CVMX_MGMT_PORT_NUM_RX_BUFFERS];
85 char tx_buffers[CVMX_MGMT_PORT_NUM_TX_BUFFERS][CVMX_MGMT_PORT_TX_BUFFER_SIZE];
86 char rx_buffers[CVMX_MGMT_PORT_NUM_RX_BUFFERS][CVMX_MGMT_PORT_RX_BUFFER_SIZE];
87 } cvmx_mgmt_port_state_t;
90 * Pointers to each mgmt port's state
92 CVMX_SHARED cvmx_mgmt_port_state_t *cvmx_mgmt_port_state_ptr = NULL;
96 * Return the number of management ports supported by this chip
98 * @return Number of ports
100 int __cvmx_mgmt_port_num_ports(void)
102 if (OCTEON_IS_MODEL(OCTEON_CN56XX))
104 else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
112 * Called to initialize a management port for use. Multiple calls
113 * to this function accross applications is safe.
115 * @param port Port to initialize
117 * @return CVMX_MGMT_PORT_SUCCESS or an error code
119 cvmx_mgmt_port_result_t cvmx_mgmt_port_initialize(int port)
121 char *alloc_name = "cvmx_mgmt_port";
122 cvmx_mixx_oring1_t oring1;
123 cvmx_mixx_ctl_t mix_ctl;
125 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
126 return CVMX_MGMT_PORT_INVALID_PARAM;
128 cvmx_mgmt_port_state_ptr = cvmx_bootmem_alloc_named(CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t), 128, alloc_name);
129 if (cvmx_mgmt_port_state_ptr)
131 memset(cvmx_mgmt_port_state_ptr, 0, CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t));
135 cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name);
137 cvmx_mgmt_port_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
140 cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Unable to get named block %s.\n", alloc_name);
141 return CVMX_MGMT_PORT_NO_MEMORY;
145 /* Reset the MIX block if the previous user had a different TX ring size, or if
146 ** we allocated a new (and blank) state structure. */
147 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
148 if (!mix_ctl.s.reset)
150 oring1.u64 = cvmx_read_csr(CVMX_MIXX_ORING1(port));
151 if (oring1.s.osize != CVMX_MGMT_PORT_NUM_TX_BUFFERS || cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
153 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
155 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
158 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
159 } while (mix_ctl.s.busy);
161 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
162 cvmx_read_csr(CVMX_MIXX_CTL(port));
163 memset(cvmx_mgmt_port_state_ptr + port, 0, sizeof(cvmx_mgmt_port_state_t));
168 if (cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
170 cvmx_mgmt_port_state_t *state = cvmx_mgmt_port_state_ptr + port;
172 cvmx_mixx_bist_t mix_bist;
173 cvmx_agl_gmx_bist_t agl_gmx_bist;
174 cvmx_mixx_oring1_t oring1;
175 cvmx_mixx_iring1_t iring1;
176 cvmx_mixx_ctl_t mix_ctl;
178 /* Make sure BIST passed */
179 mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(port));
181 cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port MIX failed BIST (0x%016llx)\n", CAST64(mix_bist.u64));
183 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
184 if (agl_gmx_bist.u64)
185 cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port AGL failed BIST (0x%016llx)\n", CAST64(agl_gmx_bist.u64));
187 /* Clear all state information */
188 memset(state, 0, sizeof(*state));
190 /* Take the control logic out of reset */
191 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
193 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
195 /* Set the PHY address */
196 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
199 state->phy_id = port; /* Will need to be change to match the board */
201 /* Create a default MAC address */
202 state->mac = 0x000000dead000000ull;
203 state->mac += 0xffffff & CAST64(state);
205 /* Setup the TX ring */
206 for (i=0; i<CVMX_MGMT_PORT_NUM_TX_BUFFERS; i++)
208 state->tx_ring[i].s.len = CVMX_MGMT_PORT_TX_BUFFER_SIZE;
209 state->tx_ring[i].s.addr = cvmx_ptr_to_phys(state->tx_buffers[i]);
212 /* Tell the HW where the TX ring is */
214 oring1.s.obase = cvmx_ptr_to_phys(state->tx_ring)>>3;
215 oring1.s.osize = CVMX_MGMT_PORT_NUM_TX_BUFFERS;
217 cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
219 /* Setup the RX ring */
220 for (i=0; i<CVMX_MGMT_PORT_NUM_RX_BUFFERS; i++)
222 /* This size is -8 due to an errata for CN56XX pass 1 */
223 state->rx_ring[i].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
224 state->rx_ring[i].s.addr = cvmx_ptr_to_phys(state->rx_buffers[i]);
227 /* Tell the HW where the RX ring is */
229 iring1.s.ibase = cvmx_ptr_to_phys(state->rx_ring)>>3;
230 iring1.s.isize = CVMX_MGMT_PORT_NUM_RX_BUFFERS;
232 cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
233 cvmx_write_csr(CVMX_MIXX_IRING2(port), CVMX_MGMT_PORT_NUM_RX_BUFFERS);
235 /* Disable the external input/output */
236 cvmx_mgmt_port_disable(port);
238 /* Set the MAC address filtering up */
239 cvmx_mgmt_port_set_mac(port, state->mac);
241 /* Set the default max size to an MTU of 1500 with L2 and VLAN */
242 cvmx_mgmt_port_set_max_packet_size(port, 1518);
244 /* Enable the port HW. Packets are not allowed until cvmx_mgmt_port_enable() is called */
246 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
247 mix_ctl.s.en = 1; /* Enable the port */
248 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
249 mix_ctl.s.mrq_hwm = 1; /* MII CB-request FIFO programmable high watermark */
250 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
252 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
254 /* Force compensation values, as they are not determined properly by HW */
255 cvmx_agl_gmx_drv_ctl_t drv_ctl;
257 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
260 drv_ctl.s.byp_en1 = 1;
266 drv_ctl.s.byp_en = 1;
270 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
273 return CVMX_MGMT_PORT_SUCCESS;
278 * Shutdown a management port. This currently disables packet IO
279 * but leaves all hardware and buffers. Another application can then
280 * call initialize() without redoing the hardware setup.
282 * @param port Management port
284 * @return CVMX_MGMT_PORT_SUCCESS or an error code
286 cvmx_mgmt_port_result_t cvmx_mgmt_port_shutdown(int port)
288 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
289 return CVMX_MGMT_PORT_INVALID_PARAM;
291 /* Stop packets from comming in */
292 cvmx_mgmt_port_disable(port);
294 /* We don't free any memory so the next intialize can reuse the HW setup */
295 return CVMX_MGMT_PORT_SUCCESS;
300 * Enable packet IO on a management port
302 * @param port Management port
304 * @return CVMX_MGMT_PORT_SUCCESS or an error code
306 cvmx_mgmt_port_result_t cvmx_mgmt_port_enable(int port)
308 cvmx_mgmt_port_state_t *state;
309 cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
310 cvmx_agl_gmx_inf_mode_t agl_gmx_inf_mode;
311 cvmx_agl_gmx_rxx_frm_ctl_t rxx_frm_ctl;
313 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
314 return CVMX_MGMT_PORT_INVALID_PARAM;
316 state = cvmx_mgmt_port_state_ptr + port;
318 cvmx_spinlock_lock(&state->lock);
321 rxx_frm_ctl.s.pre_align = 1;
322 rxx_frm_ctl.s.pad_len = 1; /* When set, disables the length check for non-min sized pkts with padding in the client data */
323 rxx_frm_ctl.s.vlan_len = 1; /* When set, disables the length check for VLAN pkts */
324 rxx_frm_ctl.s.pre_free = 1; /* When set, PREAMBLE checking is less strict */
325 rxx_frm_ctl.s.ctl_smac = 0; /* Control Pause Frames can match station SMAC */
326 rxx_frm_ctl.s.ctl_mcst = 1; /* Control Pause Frames can match globally assign Multicast address */
327 rxx_frm_ctl.s.ctl_bck = 1; /* Forward pause information to TX block */
328 rxx_frm_ctl.s.ctl_drp = 1; /* Drop Control Pause Frames */
329 rxx_frm_ctl.s.pre_strp = 1; /* Strip off the preamble */
330 rxx_frm_ctl.s.pre_chk = 1; /* This port is configured to send PREAMBLE+SFD to begin every frame. GMX checks that the PREAMBLE is sent correctly */
331 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
333 /* Enable the AGL block */
334 agl_gmx_inf_mode.u64 = 0;
335 agl_gmx_inf_mode.s.en = 1;
336 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
338 /* Configure the port duplex and enables */
339 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
340 agl_gmx_prtx.s.tx_en = 1;
341 agl_gmx_prtx.s.rx_en = 1;
342 if (cvmx_mgmt_port_get_link(port) < 0)
343 agl_gmx_prtx.s.duplex = 0;
345 agl_gmx_prtx.s.duplex = 1;
346 agl_gmx_prtx.s.en = 1;
347 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
349 cvmx_spinlock_unlock(&state->lock);
350 return CVMX_MGMT_PORT_SUCCESS;
355 * Disable packet IO on a management port
357 * @param port Management port
359 * @return CVMX_MGMT_PORT_SUCCESS or an error code
361 cvmx_mgmt_port_result_t cvmx_mgmt_port_disable(int port)
363 cvmx_mgmt_port_state_t *state;
364 cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
366 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
367 return CVMX_MGMT_PORT_INVALID_PARAM;
369 state = cvmx_mgmt_port_state_ptr + port;
371 cvmx_spinlock_lock(&state->lock);
373 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
374 agl_gmx_prtx.s.en = 0;
375 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
377 cvmx_spinlock_unlock(&state->lock);
378 return CVMX_MGMT_PORT_SUCCESS;
383 * Send a packet out the management port. The packet is copied so
384 * the input buffer isn't used after this call.
386 * @param port Management port
387 * @param packet_len Length of the packet to send. It does not include the final CRC
388 * @param buffer Packet data
390 * @return CVMX_MGMT_PORT_SUCCESS or an error code
392 cvmx_mgmt_port_result_t cvmx_mgmt_port_send(int port, int packet_len, void *buffer)
394 cvmx_mgmt_port_state_t *state;
395 cvmx_mixx_oring2_t mix_oring2;
397 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
398 return CVMX_MGMT_PORT_INVALID_PARAM;
400 /* Max sure the packet size is valid */
401 if ((packet_len < 1) || (packet_len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
402 return CVMX_MGMT_PORT_INVALID_PARAM;
405 return CVMX_MGMT_PORT_INVALID_PARAM;
407 state = cvmx_mgmt_port_state_ptr + port;
409 cvmx_spinlock_lock(&state->lock);
411 mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
412 if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1)
414 /* No room for another packet */
415 cvmx_spinlock_unlock(&state->lock);
416 return CVMX_MGMT_PORT_NO_MEMORY;
420 /* Copy the packet into the output buffer */
421 memcpy(state->tx_buffers[state->tx_write_index], buffer, packet_len);
422 /* Insert the source MAC */
423 memcpy(state->tx_buffers[state->tx_write_index] + 6, ((char*)&state->mac) + 2, 6);
424 /* Update the TX ring buffer entry size */
425 state->tx_ring[state->tx_write_index].s.len = packet_len;
426 /* Increment our TX index */
427 state->tx_write_index = (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
428 /* Ring the doorbell, send ing the packet */
430 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
431 if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
432 cvmx_write_csr(CVMX_MIXX_ORCNT(port), cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
434 cvmx_spinlock_unlock(&state->lock);
435 return CVMX_MGMT_PORT_SUCCESS;
441 * Receive a packet from the management port.
443 * @param port Management port
444 * @param buffer_len Size of the buffer to receive the packet into
445 * @param buffer Buffer to receive the packet into
447 * @return The size of the packet, or a negative erorr code on failure. Zero
448 * means that no packets were available.
450 int cvmx_mgmt_port_receive(int port, int buffer_len, void *buffer)
452 cvmx_mixx_ircnt_t mix_ircnt;
453 cvmx_mgmt_port_state_t *state;
456 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
457 return CVMX_MGMT_PORT_INVALID_PARAM;
459 /* Max sure the buffer size is valid */
461 return CVMX_MGMT_PORT_INVALID_PARAM;
464 return CVMX_MGMT_PORT_INVALID_PARAM;
466 state = cvmx_mgmt_port_state_ptr + port;
468 cvmx_spinlock_lock(&state->lock);
470 /* Find out how many RX packets are pending */
471 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
472 if (mix_ircnt.s.ircnt)
474 void *source = state->rx_buffers[state->rx_read_index];
475 uint64_t *zero_check = source;
476 /* CN56XX pass 1 has an errata where packets might start 8 bytes
477 into the buffer instead of at their correct lcoation. If the
478 first 8 bytes is zero we assume this has happened */
479 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && (*zero_check == 0))
481 /* Start off with zero bytes received */
483 /* While the completion code signals more data, copy the buffers
484 into the user's data */
485 while (state->rx_ring[state->rx_read_index].s.code == 16)
487 /* Only copy what will fit in the user's buffer */
488 int length = state->rx_ring[state->rx_read_index].s.len;
489 if (length > buffer_len)
491 memcpy(buffer, source, length);
492 /* Reduce the size of the buffer to the remaining space. If we run
493 out we will signal an error when the code 15 buffer doesn't fit */
495 buffer_len -= length;
497 /* Update this buffer for reuse in future receives. This size is
498 -8 due to an errata for CN56XX pass 1 */
499 state->rx_ring[state->rx_read_index].s.code = 0;
500 state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
501 state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
502 /* Zero the beginning of the buffer for use by the errata check */
505 /* Increment the number of RX buffers */
506 cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
507 source = state->rx_buffers[state->rx_read_index];
511 /* Check for the final good completion code */
512 if (state->rx_ring[state->rx_read_index].s.code == 15)
514 if (buffer_len >= state->rx_ring[state->rx_read_index].s.len)
516 int length = state->rx_ring[state->rx_read_index].s.len;
517 memcpy(buffer, source, length);
522 /* Not enough room for the packet */
523 cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Packet (%d) larger than supplied buffer (%d)\n", state->rx_ring[state->rx_read_index].s.len, buffer_len);
524 result = CVMX_MGMT_PORT_NO_MEMORY;
529 cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
530 cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Receive error code %d. Packet dropped(Len %d), \n",
531 state->rx_ring[state->rx_read_index].s.code, state->rx_ring[state->rx_read_index].s.len + result);
532 result = -state->rx_ring[state->rx_read_index].s.code;
535 /* Check to see if we need to change the duplex. */
536 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
537 if (cvmx_mgmt_port_get_link(port) < 0)
538 agl_gmx_prtx.s.duplex = 0;
540 agl_gmx_prtx.s.duplex = 1;
541 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
544 /* Clean out the ring buffer entry. This size is -8 due to an errata
546 state->rx_ring[state->rx_read_index].s.code = 0;
547 state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
548 state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
549 /* Zero the beginning of the buffer for use by the errata check */
552 /* Increment the number of RX buffers */
553 cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
554 /* Decrement the pending RX count */
555 cvmx_write_csr(CVMX_MIXX_IRCNT(port), 1);
559 /* No packets available */
562 cvmx_spinlock_unlock(&state->lock);
568 * Get the management port link status:
569 * 100 = 100Mbps, full duplex
570 * 10 = 10Mbps, full duplex
572 * -10 = 10Mpbs, half duplex
573 * -100 = 100Mbps, half duplex
575 * @param port Management port
579 int cvmx_mgmt_port_get_link(int port)
581 cvmx_mgmt_port_state_t *state;
584 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
585 return CVMX_MGMT_PORT_INVALID_PARAM;
587 state = cvmx_mgmt_port_state_ptr + port;
589 /* Assume 100Mbps if we don't know the PHY address */
590 if (state->phy_id == -1)
594 /* read BCM phy MDIO aux status summary register */
595 phy_status = cvmx_mdio_read(state->phy_id >> 8, state->phy_id & 0xff,
597 /* check the link status first */
598 if ((phy_status & 0x8000) == 0)
601 switch ((phy_status >> 8) & 0x7)
628 /* something's amiss if we get here... */
634 * Set the MAC address for a management port
636 * @param port Management port
637 * @param mac New MAC address. The lower 6 bytes are used.
639 * @return CVMX_MGMT_PORT_SUCCESS or an error code
641 cvmx_mgmt_port_result_t cvmx_mgmt_port_set_mac(int port, uint64_t mac)
643 cvmx_mgmt_port_state_t *state;
644 cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
646 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
647 return CVMX_MGMT_PORT_INVALID_PARAM;
649 state = cvmx_mgmt_port_state_ptr + port;
651 cvmx_spinlock_lock(&state->lock);
653 agl_gmx_rxx_adr_ctl.u64 = 0;
654 agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Only accept matching MAC addresses */
655 agl_gmx_rxx_adr_ctl.s.mcst = 0; /* Drop multicast */
656 agl_gmx_rxx_adr_ctl.s.bcst = 1; /* Allow broadcast */
657 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
659 /* Only using one of the CAMs */
660 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), (mac >> 40) & 0xff);
661 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), (mac >> 32) & 0xff);
662 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), (mac >> 24) & 0xff);
663 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), (mac >> 16) & 0xff);
664 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), (mac >> 8) & 0xff);
665 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), (mac >> 0) & 0xff);
666 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
669 cvmx_spinlock_unlock(&state->lock);
670 return CVMX_MGMT_PORT_SUCCESS;
675 * Get the MAC address for a management port
677 * @param port Management port
679 * @return MAC address
681 uint64_t cvmx_mgmt_port_get_mac(int port)
683 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
684 return CVMX_MGMT_PORT_INVALID_PARAM;
686 return cvmx_mgmt_port_state_ptr[port].mac;
690 * Set the multicast list.
692 * @param port Management port
693 * @param flags Interface flags
697 void cvmx_mgmt_port_set_multicast_list(int port, int flags)
699 cvmx_mgmt_port_state_t *state;
700 cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
702 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
705 state = cvmx_mgmt_port_state_ptr + port;
707 cvmx_spinlock_lock(&state->lock);
709 agl_gmx_rxx_adr_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port));
711 /* Allow broadcast MAC addresses */
712 if (!agl_gmx_rxx_adr_ctl.s.bcst)
713 agl_gmx_rxx_adr_ctl.s.bcst = 1;
715 if ((flags & CVMX_IFF_ALLMULTI) || (flags & CVMX_IFF_PROMISC))
716 agl_gmx_rxx_adr_ctl.s.mcst = 2; /* Force accept multicast packets */
718 agl_gmx_rxx_adr_ctl.s.mcst = 1; /* Force reject multicast packets */
720 if (flags & CVMX_IFF_PROMISC)
721 agl_gmx_rxx_adr_ctl.s.cam_mode = 0; /* Reject matches if promisc. Since CAM is shut off, should accept everything */
723 agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Filter packets based on the CAM */
725 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
727 if (flags & CVMX_IFF_PROMISC)
728 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
730 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
732 cvmx_spinlock_unlock(&state->lock);
737 * Set the maximum packet allowed in. Size is specified
738 * including L2 but without FCS. A normal MTU would corespond
739 * to 1514 assuming the standard 14 byte L2 header.
741 * @param port Management port
742 * @param size_without_fcs
743 * Size in bytes without FCS
745 void cvmx_mgmt_port_set_max_packet_size(int port, int size_without_fcs)
747 cvmx_mgmt_port_state_t *state;
749 if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
752 state = cvmx_mgmt_port_state_ptr + port;
754 cvmx_spinlock_lock(&state->lock);
755 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
756 cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), (size_without_fcs+7) & 0xfff8);
757 cvmx_spinlock_unlock(&state->lock);