1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
49 * Helper functions for common, but complicated tasks.
51 * <hr>$Revision: 70030 $<hr>
53 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
54 #include <linux/module.h>
55 #include <asm/octeon/cvmx.h>
56 #include <asm/octeon/cvmx-config.h>
57 #include <asm/octeon/cvmx-bootmem.h>
58 #include <asm/octeon/cvmx-sriox-defs.h>
59 #include <asm/octeon/cvmx-npi-defs.h>
60 #include <asm/octeon/cvmx-mio-defs.h>
61 #include <asm/octeon/cvmx-pexp-defs.h>
62 #include <asm/octeon/cvmx-pip-defs.h>
63 #include <asm/octeon/cvmx-asxx-defs.h>
64 #include <asm/octeon/cvmx-gmxx-defs.h>
65 #include <asm/octeon/cvmx-smix-defs.h>
66 #include <asm/octeon/cvmx-dbg-defs.h>
67 #include <asm/octeon/cvmx-sso-defs.h>
69 #include <asm/octeon/cvmx-gmx.h>
70 #include <asm/octeon/cvmx-fpa.h>
71 #include <asm/octeon/cvmx-pip.h>
72 #include <asm/octeon/cvmx-pko.h>
73 #include <asm/octeon/cvmx-ipd.h>
74 #include <asm/octeon/cvmx-spi.h>
75 #include <asm/octeon/cvmx-clock.h>
76 #include <asm/octeon/cvmx-helper.h>
77 #include <asm/octeon/cvmx-helper-board.h>
78 #include <asm/octeon/cvmx-helper-errata.h>
79 #include <asm/octeon/cvmx-helper-cfg.h>
81 #if !defined(__FreeBSD__) || !defined(_KERNEL)
82 #include "executive-config.h"
85 #include "cvmx-sysinfo.h"
86 #include "cvmx-bootmem.h"
87 #include "cvmx-version.h"
88 #include "cvmx-helper-check-defines.h"
90 #if !defined(__FreeBSD__) || !defined(_KERNEL)
91 #include "cvmx-error.h"
92 #include "cvmx-config.h"
100 #include "cvmx-helper.h"
101 #include "cvmx-helper-board.h"
102 #include "cvmx-helper-errata.h"
103 #include "cvmx-helper-cfg.h"
107 #ifdef CVMX_ENABLE_PKO_FUNCTIONS
110 * cvmx_override_pko_queue_priority(int pko_port, uint64_t
111 * priorities[16]) is a function pointer. It is meant to allow
112 * customization of the PKO queue priorities based on the port
113 * number. Users should set this pointer to a function before
114 * calling any cvmx-helper operations.
116 CVMX_SHARED void (*cvmx_override_pko_queue_priority)(int ipd_port,
117 uint64_t *priorities) = NULL;
118 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
119 EXPORT_SYMBOL(cvmx_override_pko_queue_priority);
123 * cvmx_override_ipd_port_setup(int ipd_port) is a function
124 * pointer. It is meant to allow customization of the IPD
125 * port/port kind setup before packet input/output comes online.
126 * It is called after cvmx-helper does the default IPD configuration,
127 * but before IPD is enabled. Users should set this pointer to a
128 * function before calling any cvmx-helper operations.
130 CVMX_SHARED void (*cvmx_override_ipd_port_setup)(int ipd_port) = NULL;
133 * Return the number of interfaces the chip has. Each interface
134 * may have multiple ports. Most chips support two interfaces,
135 * but the CNX0XX and CNX1XX are exceptions. These only support
138 * @return Number of interfaces on chip
140 int cvmx_helper_get_number_of_interfaces(void)
142 switch (cvmx_sysinfo_get()->board_type) {
143 #if defined(OCTEON_VENDOR_LANNER)
144 case CVMX_BOARD_TYPE_CUST_LANNER_MR955:
146 case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
149 #if defined(OCTEON_VENDOR_RADISYS)
150 case CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE:
157 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
159 else if (OCTEON_IS_MODEL(OCTEON_CN66XX))
160 if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
164 else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
166 else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX))
171 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
172 EXPORT_SYMBOL(cvmx_helper_get_number_of_interfaces);
177 * Return the number of ports on an interface. Depending on the
178 * chip and configuration, this can be 1-16. A value of 0
179 * specifies that the interface doesn't exist or isn't usable.
181 * @param interface Interface to get the port count for
183 * @return Number of ports on interface. Can be Zero.
185 int cvmx_helper_ports_on_interface(int interface)
187 if (octeon_has_feature(OCTEON_FEATURE_PKND))
188 return cvmx_helper_interface_enumerate(interface);
190 return __cvmx_helper_get_num_ipd_ports(interface);
192 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
193 EXPORT_SYMBOL(cvmx_helper_ports_on_interface);
198 * Get the operating mode of an interface. Depending on the Octeon
199 * chip and configuration, this function returns an enumeration
200 * of the type of packet I/O supported by an interface.
202 * @param interface Interface to probe
204 * @return Mode of the interface. Unknown or unsupported interfaces return
207 cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
209 cvmx_gmxx_inf_mode_t mode;
211 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
213 cvmx_mio_qlmx_cfg_t qlm_cfg;
217 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
218 /* QLM is disabled when QLM SPD is 15. */
219 if (qlm_cfg.s.qlm_spd == 15)
220 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
222 if (qlm_cfg.s.qlm_cfg == 7)
223 return CVMX_HELPER_INTERFACE_MODE_RXAUI;
224 else if (qlm_cfg.s.qlm_cfg == 2)
225 return CVMX_HELPER_INTERFACE_MODE_SGMII;
226 else if (qlm_cfg.s.qlm_cfg == 3)
227 return CVMX_HELPER_INTERFACE_MODE_XAUI;
229 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
232 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
233 /* QLM is disabled when QLM SPD is 15. */
234 if (qlm_cfg.s.qlm_spd == 15)
235 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
237 if (qlm_cfg.s.qlm_cfg == 7)
238 return CVMX_HELPER_INTERFACE_MODE_RXAUI;
240 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
245 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface));
246 /* QLM is disabled when QLM SPD is 15. */
247 if (qlm_cfg.s.qlm_spd == 15)
248 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
250 if (qlm_cfg.s.qlm_cfg == 2)
251 return CVMX_HELPER_INTERFACE_MODE_SGMII;
252 else if (qlm_cfg.s.qlm_cfg == 3)
253 return CVMX_HELPER_INTERFACE_MODE_XAUI;
255 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
259 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface - 4));
260 /* QLM is disabled when QLM SPD is 15. */
261 if (qlm_cfg.s.qlm_spd == 15)
262 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
264 if (qlm_cfg.s.qlm_cfg == 1)
266 return CVMX_HELPER_INTERFACE_MODE_ILK;
269 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
272 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(3));
273 /* QLM is disabled when QLM SPD is 15. */
274 if (qlm_cfg.s.qlm_spd == 15)
275 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
276 else if (qlm_cfg.s.qlm_cfg != 0)
278 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
279 if (qlm_cfg.s.qlm_cfg != 0)
280 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
282 return CVMX_HELPER_INTERFACE_MODE_NPI;
285 return CVMX_HELPER_INTERFACE_MODE_LOOP;
288 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
294 return CVMX_HELPER_INTERFACE_MODE_NPI;
298 if (OCTEON_IS_MODEL(OCTEON_CN56XX)
299 || OCTEON_IS_MODEL(OCTEON_CN52XX)
300 || OCTEON_IS_MODEL(OCTEON_CN6XXX)
301 || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
302 return CVMX_HELPER_INTERFACE_MODE_LOOP;
304 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
307 /* Only present in CN63XX & CN66XX Octeon model */
308 if ((OCTEON_IS_MODEL(OCTEON_CN63XX) && (interface == 4 || interface == 5))
309 || (OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 && interface <= 7))
311 cvmx_sriox_status_reg_t sriox_status_reg;
313 /* cn66xx pass1.0 has only 2 SRIO interfaces. */
314 if ((interface == 5 || interface == 7) && OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
315 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
317 sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(interface-4));
318 if (sriox_status_reg.s.srio)
319 return CVMX_HELPER_INTERFACE_MODE_SRIO;
321 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
324 /* Interface 5 always disabled in CN66XX */
325 if (OCTEON_IS_MODEL(OCTEON_CN66XX))
327 cvmx_mio_qlmx_cfg_t mio_qlm_cfg;
329 /* QLM2 is SGMII0 and QLM1 is SGMII1 */
331 mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
332 else if (interface == 1)
333 mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
335 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
337 if (mio_qlm_cfg.s.qlm_spd == 15)
338 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
340 if (mio_qlm_cfg.s.qlm_cfg == 9)
341 return CVMX_HELPER_INTERFACE_MODE_SGMII;
342 else if (mio_qlm_cfg.s.qlm_cfg == 11)
343 return CVMX_HELPER_INTERFACE_MODE_XAUI;
345 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
347 else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
349 cvmx_mio_qlmx_cfg_t qlm_cfg;
353 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
354 if (qlm_cfg.s.qlm_cfg == 2)
355 return CVMX_HELPER_INTERFACE_MODE_SGMII;
356 else if (qlm_cfg.s.qlm_cfg == 3)
357 return CVMX_HELPER_INTERFACE_MODE_XAUI;
359 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
361 else if (interface == 1)
363 /* If QLM 1 is PEV0/PEM1 mode, them QLM0 cannot be SGMII/XAUI */
364 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
365 if (qlm_cfg.s.qlm_cfg == 1)
366 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
368 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
369 if (qlm_cfg.s.qlm_cfg == 2)
370 return CVMX_HELPER_INTERFACE_MODE_SGMII;
371 else if (qlm_cfg.s.qlm_cfg == 3)
372 return CVMX_HELPER_INTERFACE_MODE_XAUI;
374 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
378 if (interface == 0 && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5 && cvmx_sysinfo_get()->board_rev_major == 1)
380 /* Lie about interface type of CN3005 board. This board has a switch on port 1 like
381 ** the other evaluation boards, but it is connected over RGMII instead of GMII. Report
382 ** GMII mode so that the speed is forced to 1 Gbit full duplex. Other than some initial configuration
383 ** (which does not use the output of this function) there is no difference in setup between GMII and RGMII modes.
385 return CVMX_HELPER_INTERFACE_MODE_GMII;
388 /* Interface 1 is always disabled on CN31XX and CN30XX */
390 && (OCTEON_IS_MODEL(OCTEON_CN31XX)
391 || OCTEON_IS_MODEL(OCTEON_CN30XX)
392 || OCTEON_IS_MODEL(OCTEON_CN50XX)
393 || OCTEON_IS_MODEL(OCTEON_CN52XX)
394 || OCTEON_IS_MODEL(OCTEON_CN63XX)
395 || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
396 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
398 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
400 if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
402 switch(mode.cn56xx.mode)
404 case 0: return CVMX_HELPER_INTERFACE_MODE_DISABLED;
405 case 1: return CVMX_HELPER_INTERFACE_MODE_XAUI;
406 case 2: return CVMX_HELPER_INTERFACE_MODE_SGMII;
407 case 3: return CVMX_HELPER_INTERFACE_MODE_PICMG;
408 default:return CVMX_HELPER_INTERFACE_MODE_DISABLED;
411 else if (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
413 switch(mode.cn63xx.mode)
415 case 0: return CVMX_HELPER_INTERFACE_MODE_SGMII;
416 case 1: return CVMX_HELPER_INTERFACE_MODE_XAUI;
417 default: return CVMX_HELPER_INTERFACE_MODE_DISABLED;
423 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
427 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
428 return CVMX_HELPER_INTERFACE_MODE_SPI;
430 return CVMX_HELPER_INTERFACE_MODE_GMII;
433 return CVMX_HELPER_INTERFACE_MODE_RGMII;
436 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
437 EXPORT_SYMBOL(cvmx_helper_interface_get_mode);
442 * Configure the IPD/PIP tagging and QoS options for a specific
443 * port. This function determines the POW work queue entry
444 * contents for a port. The setup performed here is controlled by
445 * the defines in executive-config.h.
447 * @param ipd_port Port/Port kind to configure. This follows the IPD numbering,
448 * not the per interface numbering
450 * @return Zero on success, negative on failure
452 static int __cvmx_helper_port_setup_ipd(int ipd_port)
454 cvmx_pip_prt_cfgx_t port_config;
455 cvmx_pip_prt_tagx_t tag_config;
457 if (octeon_has_feature(OCTEON_FEATURE_PKND))
459 int interface, index, pknd;
460 cvmx_pip_prt_cfgbx_t prt_cfgbx;
462 interface = cvmx_helper_get_interface_num(ipd_port);
463 index = cvmx_helper_get_interface_index_num(ipd_port);
464 pknd = cvmx_helper_get_pknd(interface, index);
466 port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(pknd));
467 tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pknd));
469 port_config.s.qos = pknd & 0x7;
471 /* Default BPID to use for packets on this port-kind */
472 prt_cfgbx.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGBX(pknd));
473 prt_cfgbx.s.bpid = pknd;
474 cvmx_write_csr(CVMX_PIP_PRT_CFGBX(pknd), prt_cfgbx.u64);
478 port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
479 tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
481 /* Have each port go to a different POW queue */
482 port_config.s.qos = ipd_port & 0x7;
485 /* Process the headers and place the IP header in the work queue */
486 port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
488 tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
489 tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
490 tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
491 tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
492 tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
493 tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
494 tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
495 tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
496 tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
497 tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
498 tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
499 tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
500 tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
501 tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
502 tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
503 tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
504 /* Put all packets in group 0. Other groups can be used by the app */
505 tag_config.s.grp = 0;
507 cvmx_pip_config_port(ipd_port, port_config, tag_config);
509 /* Give the user a chance to override our setting for each port */
510 if (cvmx_override_ipd_port_setup)
511 cvmx_override_ipd_port_setup(ipd_port);
517 * Enable or disable FCS stripping for all the ports on an interface.
520 * @param nports number of ports
521 * @param has_fcs 0 for disable and !0 for enable
523 static int cvmx_helper_fcs_op(int interface, int nports, int has_fcs)
528 cvmx_pip_sub_pkind_fcsx_t pkind_fcsx;
529 cvmx_pip_prt_cfgx_t port_cfg;
531 if (!octeon_has_feature(OCTEON_FEATURE_PKND))
535 for (index = 0; index < nports; index++)
536 port_bit |= ((uint64_t)1 << cvmx_helper_get_pknd(interface, index));
538 pkind_fcsx.u64 = cvmx_read_csr(CVMX_PIP_SUB_PKIND_FCSX(0));
540 pkind_fcsx.s.port_bit |= port_bit;
542 pkind_fcsx.s.port_bit &= ~port_bit;
543 cvmx_write_csr(CVMX_PIP_SUB_PKIND_FCSX(0), pkind_fcsx.u64);
545 for (pknd = 0; pknd < 64; pknd++)
547 if ((1ull << pknd) & port_bit)
549 port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(pknd));
550 port_cfg.s.crc_en = (has_fcs) ? 1 : 0;
551 cvmx_write_csr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
559 * Determine the actual number of hardware ports connected to an
560 * interface. It doesn't setup the ports or enable them.
562 * @param interface Interface to enumerate
564 * @return The number of ports on the interface, negative on failure
566 int cvmx_helper_interface_enumerate(int interface)
568 switch (cvmx_helper_interface_get_mode(interface)) {
569 /* XAUI is a single high speed port */
570 case CVMX_HELPER_INTERFACE_MODE_XAUI:
571 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
572 return __cvmx_helper_xaui_enumerate(interface);
573 /* RGMII/GMII/MII are all treated about the same. Most functions
574 refer to these ports as RGMII */
575 case CVMX_HELPER_INTERFACE_MODE_RGMII:
576 case CVMX_HELPER_INTERFACE_MODE_GMII:
577 return __cvmx_helper_rgmii_enumerate(interface);
578 /* SPI4 can have 1-16 ports depending on the device at the other end */
579 case CVMX_HELPER_INTERFACE_MODE_SPI:
580 return __cvmx_helper_spi_enumerate(interface);
581 /* SGMII can have 1-4 ports depending on how many are hooked up */
582 case CVMX_HELPER_INTERFACE_MODE_SGMII:
583 case CVMX_HELPER_INTERFACE_MODE_PICMG:
584 return __cvmx_helper_sgmii_enumerate(interface);
585 /* PCI target Network Packet Interface */
586 case CVMX_HELPER_INTERFACE_MODE_NPI:
587 return __cvmx_helper_npi_enumerate(interface);
588 /* Special loopback only ports. These are not the same
589 * as other ports in loopback mode */
590 case CVMX_HELPER_INTERFACE_MODE_LOOP:
591 return __cvmx_helper_loop_enumerate(interface);
592 /* SRIO has 2^N ports, where N is number of interfaces */
593 case CVMX_HELPER_INTERFACE_MODE_SRIO:
594 return __cvmx_helper_srio_enumerate(interface);
596 case CVMX_HELPER_INTERFACE_MODE_ILK:
597 return __cvmx_helper_ilk_enumerate(interface);
598 /* These types don't support ports to IPD/PKO */
599 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
600 case CVMX_HELPER_INTERFACE_MODE_PCIE:
607 * This function probes an interface to determine the actual number of
608 * hardware ports connected to it. It does some setup the ports but
609 * doesn't enable them. The main goal here is to set the global
610 * interface_port_count[interface] correctly. Final hardware setup of
611 * the ports will be performed later.
613 * @param interface Interface to probe
615 * @return Zero on success, negative on failure
617 int cvmx_helper_interface_probe(int interface)
619 /* At this stage in the game we don't want packets to be moving yet.
620 The following probe calls should perform hardware setup
621 needed to determine port counts. Receive must still be disabled */
624 enum cvmx_pko_padding padding = CVMX_PKO_PADDING_NONE;
628 switch (cvmx_helper_interface_get_mode(interface))
630 /* These types don't support ports to IPD/PKO */
631 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
632 case CVMX_HELPER_INTERFACE_MODE_PCIE:
635 /* XAUI is a single high speed port */
636 case CVMX_HELPER_INTERFACE_MODE_XAUI:
637 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
638 nports = __cvmx_helper_xaui_probe(interface);
640 padding = CVMX_PKO_PADDING_60;
642 /* RGMII/GMII/MII are all treated about the same. Most functions
643 refer to these ports as RGMII */
644 case CVMX_HELPER_INTERFACE_MODE_RGMII:
645 case CVMX_HELPER_INTERFACE_MODE_GMII:
646 nports = __cvmx_helper_rgmii_probe(interface);
647 padding = CVMX_PKO_PADDING_60;
649 /* SPI4 can have 1-16 ports depending on the device at the other end */
650 case CVMX_HELPER_INTERFACE_MODE_SPI:
651 nports = __cvmx_helper_spi_probe(interface);
652 padding = CVMX_PKO_PADDING_60;
654 /* SGMII can have 1-4 ports depending on how many are hooked up */
655 case CVMX_HELPER_INTERFACE_MODE_SGMII:
656 padding = CVMX_PKO_PADDING_60;
657 case CVMX_HELPER_INTERFACE_MODE_PICMG:
658 nports = __cvmx_helper_sgmii_probe(interface);
661 /* PCI target Network Packet Interface */
662 case CVMX_HELPER_INTERFACE_MODE_NPI:
663 nports = __cvmx_helper_npi_probe(interface);
665 /* Special loopback only ports. These are not the same as other ports
667 case CVMX_HELPER_INTERFACE_MODE_LOOP:
668 nports = __cvmx_helper_loop_probe(interface);
670 /* SRIO has 2^N ports, where N is number of interfaces */
671 case CVMX_HELPER_INTERFACE_MODE_SRIO:
672 nports = __cvmx_helper_srio_probe(interface);
674 case CVMX_HELPER_INTERFACE_MODE_ILK:
675 nports = __cvmx_helper_ilk_probe(interface);
677 padding = CVMX_PKO_PADDING_60;
684 if (!octeon_has_feature(OCTEON_FEATURE_PKND))
687 nports = __cvmx_helper_board_interface_probe(interface, nports);
688 __cvmx_helper_init_interface(interface, nports, has_fcs, padding);
689 cvmx_helper_fcs_op(interface, nports, has_fcs);
691 /* Make sure all global variables propagate to other cores */
700 * Setup the IPD/PIP for the ports on an interface. Packet
701 * classification and tagging are set for every port on the
702 * interface. The number of ports on the interface must already
705 * @param interface Interface to setup IPD/PIP for
707 * @return Zero on success, negative on failure
709 static int __cvmx_helper_interface_setup_ipd(int interface)
712 cvmx_helper_interface_mode_t mode;
713 int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
714 int num_ports = cvmx_helper_ports_on_interface(interface);
717 if (num_ports == CVMX_HELPER_CFG_INVALID_VALUE)
720 mode = cvmx_helper_interface_get_mode(interface);
722 if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
723 __cvmx_helper_loop_enable(interface);
726 if (octeon_has_feature(OCTEON_FEATURE_PKND))
728 if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII)
734 __cvmx_helper_port_setup_ipd(ipd_port);
744 * Setup global setting for IPD/PIP not related to a specific
745 * interface or port. This must be called before IPD is enabled.
747 * @return Zero on success, negative on failure.
749 static int __cvmx_helper_global_setup_ipd(void)
751 #ifndef CVMX_HELPER_IPD_DRAM_MODE
752 #define CVMX_HELPER_IPD_DRAM_MODE CVMX_IPD_OPC_MODE_STT
754 /* Setup the global packet input options */
755 cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE/8,
756 CVMX_HELPER_FIRST_MBUFF_SKIP/8,
757 CVMX_HELPER_NOT_FIRST_MBUFF_SKIP/8,
758 (CVMX_HELPER_FIRST_MBUFF_SKIP+8) / 128, /* The +8 is to account for the next ptr */
759 (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP+8) / 128, /* The +8 is to account for the next ptr */
761 CVMX_HELPER_IPD_DRAM_MODE,
769 * Setup the PKO for the ports on an interface. The number of
770 * queues per port and the priority of each PKO output queue
771 * is set here. PKO must be disabled when this function is called.
773 * @param interface Interface to setup PKO for
775 * @return Zero on success, negative on failure
777 static int __cvmx_helper_interface_setup_pko(int interface)
779 /* Each packet output queue has an associated priority. The higher the
780 priority, the more often it can send a packet. A priority of 8 means
781 it can send in all 8 rounds of contention. We're going to make each
782 queue one less than the last.
783 The vector of priorities has been extended to support CN5xxx CPUs,
784 where up to 16 queues can be associated to a port.
785 To keep backward compatibility we don't change the initial 8
786 priorities and replicate them in the second half.
787 With per-core PKO queues (PKO lockless operation) all queues have
788 the same priority. */
789 /* uint64_t priorities[16] = {8,7,6,5,4,3,2,1,8,7,6,5,4,3,2,1}; */
790 uint64_t priorities[16] = {[0 ... 15] = 8};
792 /* Setup the IPD/PIP and PKO for the ports discovered above. Here packet
793 classification, tagging and output priorities are set */
794 int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
795 int num_ports = cvmx_helper_ports_on_interface(interface);
798 /* Give the user a chance to override the per queue priorities */
799 if (cvmx_override_pko_queue_priority)
800 cvmx_override_pko_queue_priority(ipd_port, priorities);
802 cvmx_pko_config_port(ipd_port, cvmx_pko_get_base_queue_per_core(ipd_port, 0),
803 cvmx_pko_get_num_queues(ipd_port), priorities);
812 * Setup global setting for PKO not related to a specific
813 * interface or port. This must be called before PKO is enabled.
815 * @return Zero on success, negative on failure.
817 static int __cvmx_helper_global_setup_pko(void)
819 /* Disable tagwait FAU timeout. This needs to be done before anyone might
820 start packet output using tags */
821 cvmx_iob_fau_timeout_t fau_to;
823 fau_to.s.tout_val = 0xfff;
824 fau_to.s.tout_enb = 0;
825 cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
827 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
828 cvmx_pko_reg_min_pkt_t min_pkt;
831 min_pkt.s.size1 = 59;
832 min_pkt.s.size2 = 59;
833 min_pkt.s.size3 = 59;
834 min_pkt.s.size4 = 59;
835 min_pkt.s.size5 = 59;
836 min_pkt.s.size6 = 59;
837 min_pkt.s.size7 = 59;
838 cvmx_write_csr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
847 * Setup global backpressure setting.
849 * @return Zero on success, negative on failure
851 static int __cvmx_helper_global_setup_backpressure(void)
853 #if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
854 /* Disable backpressure if configured to do so */
855 /* Disable backpressure (pause frame) generation */
856 int num_interfaces = cvmx_helper_get_number_of_interfaces();
858 for (interface=0; interface<num_interfaces; interface++)
860 switch (cvmx_helper_interface_get_mode(interface))
862 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
863 case CVMX_HELPER_INTERFACE_MODE_PCIE:
864 case CVMX_HELPER_INTERFACE_MODE_SRIO:
865 case CVMX_HELPER_INTERFACE_MODE_ILK:
866 case CVMX_HELPER_INTERFACE_MODE_NPI:
867 case CVMX_HELPER_INTERFACE_MODE_LOOP:
868 case CVMX_HELPER_INTERFACE_MODE_XAUI:
869 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
871 case CVMX_HELPER_INTERFACE_MODE_RGMII:
872 case CVMX_HELPER_INTERFACE_MODE_GMII:
873 case CVMX_HELPER_INTERFACE_MODE_SPI:
874 case CVMX_HELPER_INTERFACE_MODE_SGMII:
875 case CVMX_HELPER_INTERFACE_MODE_PICMG:
876 cvmx_gmx_set_backpressure_override(interface, 0xf);
880 //cvmx_dprintf("Disabling backpressure\n");
888 * Verify the per port IPD backpressure is aligned properly.
889 * @return Zero if working, non zero if misaligned
891 static int __cvmx_helper_backpressure_is_misaligned(void)
893 uint64_t ipd_int_enb;
894 cvmx_ipd_ctl_status_t ipd_reg;
898 const int port1 = 16;
899 cvmx_helper_interface_mode_t mode0 = cvmx_helper_interface_get_mode(0);
900 cvmx_helper_interface_mode_t mode1 = cvmx_helper_interface_get_mode(1);
902 /* Disable error interrupts while we check backpressure */
903 ipd_int_enb = cvmx_read_csr(CVMX_IPD_INT_ENB);
904 cvmx_write_csr(CVMX_IPD_INT_ENB, 0);
906 /* Enable per port backpressure */
907 ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
908 ipd_reg.s.pbp_en = 1;
909 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
911 if (mode0 != CVMX_HELPER_INTERFACE_MODE_DISABLED)
913 /* Enable backpressure for port with a zero threshold */
914 cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port0), 1<<17);
915 /* Add 1000 to the page count to simulate packets coming in */
916 cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port0<<25) | 1000);
919 if (mode1 != CVMX_HELPER_INTERFACE_MODE_DISABLED)
921 /* Enable backpressure for port with a zero threshold */
922 cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port1), 1<<17);
923 /* Add 1000 to the page count to simulate packets coming in */
924 cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port1<<25) | 1000);
927 /* Wait 500 cycles for the BP to update */
930 /* Read the BP state from the debug select register */
933 case CVMX_HELPER_INTERFACE_MODE_SPI:
934 cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x9004);
935 bp_status0 = cvmx_read_csr(CVMX_DBG_DATA);
936 bp_status0 = 0xffff & ~bp_status0;
938 case CVMX_HELPER_INTERFACE_MODE_RGMII:
939 case CVMX_HELPER_INTERFACE_MODE_GMII:
940 cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x0e00);
941 bp_status0 = 0xffff & cvmx_read_csr(CVMX_DBG_DATA);
943 case CVMX_HELPER_INTERFACE_MODE_XAUI:
944 case CVMX_HELPER_INTERFACE_MODE_SGMII:
945 case CVMX_HELPER_INTERFACE_MODE_PICMG:
946 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, 0x0e00);
947 bp_status0 = 0xffff & cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
950 bp_status0 = 1<<port0;
954 /* Read the BP state from the debug select register */
957 case CVMX_HELPER_INTERFACE_MODE_SPI:
958 cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x9804);
959 bp_status1 = cvmx_read_csr(CVMX_DBG_DATA);
960 bp_status1 = 0xffff & ~bp_status1;
962 case CVMX_HELPER_INTERFACE_MODE_RGMII:
963 case CVMX_HELPER_INTERFACE_MODE_GMII:
964 cvmx_write_csr(CVMX_NPI_DBG_SELECT, 0x1600);
965 bp_status1 = 0xffff & cvmx_read_csr(CVMX_DBG_DATA);
967 case CVMX_HELPER_INTERFACE_MODE_XAUI:
968 case CVMX_HELPER_INTERFACE_MODE_SGMII:
969 case CVMX_HELPER_INTERFACE_MODE_PICMG:
970 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, 0x1600);
971 bp_status1 = 0xffff & cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
974 bp_status1 = 1<<(port1-16);
978 if (mode0 != CVMX_HELPER_INTERFACE_MODE_DISABLED)
981 cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port0<<25) | (0x1ffffff & -1000));
982 cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port0), 0);
985 if (mode1 != CVMX_HELPER_INTERFACE_MODE_DISABLED)
988 cvmx_write_csr(CVMX_IPD_SUB_PORT_BP_PAGE_CNT, (port1<<25) | (0x1ffffff & -1000));
989 cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port1), 0);
992 /* Clear any error interrupts that might have been set */
993 cvmx_write_csr(CVMX_IPD_INT_SUM, 0x1f);
994 cvmx_write_csr(CVMX_IPD_INT_ENB, ipd_int_enb);
996 return ((bp_status0 != 1ull<<port0) || (bp_status1 != 1ull<<(port1-16)));
1002 * Enable packet input/output from the hardware. This function is
1003 * called after all internal setup is complete and IPD is enabled.
1004 * After this function completes, packets will be accepted from the
1005 * hardware ports. PKO should still be disabled to make sure packets
1006 * aren't sent out partially setup hardware.
1008 * @param interface Interface to enable
1010 * @return Zero on success, negative on failure
1012 static int __cvmx_helper_packet_hardware_enable(int interface)
1015 switch (cvmx_helper_interface_get_mode(interface))
1017 /* These types don't support ports to IPD/PKO */
1018 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1019 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1020 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1023 /* XAUI is a single high speed port */
1024 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1025 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1026 result = __cvmx_helper_xaui_enable(interface);
1028 /* RGMII/GMII/MII are all treated about the same. Most functions
1029 refer to these ports as RGMII */
1030 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1031 case CVMX_HELPER_INTERFACE_MODE_GMII:
1032 result = __cvmx_helper_rgmii_enable(interface);
1034 /* SPI4 can have 1-16 ports depending on the device at the other end */
1035 case CVMX_HELPER_INTERFACE_MODE_SPI:
1036 result = __cvmx_helper_spi_enable(interface);
1038 /* SGMII can have 1-4 ports depending on how many are hooked up */
1039 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1040 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1041 result = __cvmx_helper_sgmii_enable(interface);
1043 /* PCI target Network Packet Interface */
1044 case CVMX_HELPER_INTERFACE_MODE_NPI:
1045 result = __cvmx_helper_npi_enable(interface);
1047 /* SRIO has 2^N ports, where N is number of interfaces */
1048 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1049 result = __cvmx_helper_srio_enable(interface);
1051 case CVMX_HELPER_INTERFACE_MODE_ILK:
1052 result = __cvmx_helper_ilk_enable(interface);
1055 result |= __cvmx_helper_board_hardware_enable(interface);
1061 * Called after all internal packet IO paths are setup. This
1062 * function enables IPD/PIP and begins packet input and output.
1064 * @return Zero on success, negative on failure
1066 int cvmx_helper_ipd_and_packet_input_enable(void)
1074 /* Time to enable hardware ports packet input and output. Note that at this
1075 point IPD/PIP must be fully functional and PKO must be disabled */
1076 num_interfaces = cvmx_helper_get_number_of_interfaces();
1077 for (interface=0; interface<num_interfaces; interface++)
1079 if (cvmx_helper_ports_on_interface(interface) > 0)
1081 //cvmx_dprintf("Enabling packet I/O on interface %d\n", interface);
1082 __cvmx_helper_packet_hardware_enable(interface);
1086 /* Finally enable PKO now that the entire path is up and running */
1089 if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1) || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1)) &&
1090 (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
1091 __cvmx_helper_errata_fix_ipd_ptr_alignment();
1094 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1095 EXPORT_SYMBOL(cvmx_helper_ipd_and_packet_input_enable);
1098 #define __CVMX_SSO_RWQ_SIZE 256
1100 int cvmx_helper_initialize_sso(int wqe_entries)
1102 int cvm_oct_sso_number_rwq_bufs;
1105 cvmx_sso_cfg_t sso_cfg;
1106 cvmx_fpa_fpfx_marks_t fpa_marks;
1108 if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
1112 * CN68XX-P1 may reset with the wrong values, put in
1113 * the correct values.
1116 fpa_marks.s.fpf_wr = 0xa4;
1117 fpa_marks.s.fpf_rd = 0x40;
1118 cvmx_write_csr(CVMX_FPA_FPF8_MARKS, fpa_marks.u64);
1120 cvm_oct_sso_number_rwq_bufs = ((wqe_entries - 1) / 26) + 1 + 48 + 8;
1122 mem = cvmx_bootmem_alloc(__CVMX_SSO_RWQ_SIZE * cvm_oct_sso_number_rwq_bufs, CVMX_CACHE_LINE_SIZE);
1124 cvmx_dprintf("Out of memory initializing sso pool\n");
1127 /* Make sure RWI/RWO is disabled. */
1128 sso_cfg.u64 = cvmx_read_csr(CVMX_SSO_CFG);
1130 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
1132 for (i = cvm_oct_sso_number_rwq_bufs - 8; i > 0; i--) {
1133 cvmx_sso_rwq_psh_fptr_t fptr;
1136 fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_PSH_FPTR);
1141 fptr.s.fptr = cvmx_ptr_to_phys(mem) >> 7;
1142 cvmx_write_csr(CVMX_SSO_RWQ_PSH_FPTR, fptr.u64);
1143 mem = mem + __CVMX_SSO_RWQ_SIZE;
1146 for (i = 0; i < 8; i++) {
1147 cvmx_sso_rwq_head_ptrx_t head_ptr;
1148 cvmx_sso_rwq_tail_ptrx_t tail_ptr;
1152 head_ptr.s.ptr = cvmx_ptr_to_phys(mem) >> 7;
1153 tail_ptr.s.ptr = head_ptr.s.ptr;
1154 cvmx_write_csr(CVMX_SSO_RWQ_HEAD_PTRX(i), head_ptr.u64);
1155 cvmx_write_csr(CVMX_SSO_RWQ_TAIL_PTRX(i), tail_ptr.u64);
1156 mem = mem + __CVMX_SSO_RWQ_SIZE;
1159 sso_cfg.u64 = cvmx_read_csr(CVMX_SSO_CFG);
1161 sso_cfg.s.dwb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB);
1162 sso_cfg.s.rwq_byp_dis = 0;
1163 sso_cfg.s.rwio_byp_dis = 0;
1164 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
1169 int cvmx_helper_uninitialize_sso(void)
1171 cvmx_fpa_quex_available_t queue_available;
1172 cvmx_sso_cfg_t sso_cfg;
1173 cvmx_sso_rwq_pop_fptr_t pop_fptr;
1174 cvmx_sso_rwq_psh_fptr_t fptr;
1175 cvmx_sso_fpage_cnt_t fpage_cnt;
1176 int num_to_transfer, i;
1179 if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
1182 sso_cfg.u64 = cvmx_read_csr(CVMX_SSO_CFG);
1184 sso_cfg.s.rwq_byp_dis = 1;
1185 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
1186 cvmx_read_csr(CVMX_SSO_CFG);
1187 queue_available.u64 = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(8));
1189 /* Make CVMX_FPA_QUEX_AVAILABLE(8) % 16 == 0*/
1190 for (num_to_transfer = (16 - queue_available.s.que_siz) % 16;
1191 num_to_transfer > 0; num_to_transfer--) {
1193 pop_fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_POP_FPTR);
1194 } while (!pop_fptr.s.val);
1196 fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_PSH_FPTR);
1201 fptr.s.fptr = pop_fptr.s.fptr;
1202 cvmx_write_csr(CVMX_SSO_RWQ_PSH_FPTR, fptr.u64);
1204 cvmx_read_csr(CVMX_SSO_CFG);
1207 queue_available.u64 = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(8));
1208 } while (queue_available.s.que_siz % 16);
1211 sso_cfg.s.rwq_byp_dis = 0;
1212 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
1214 for (i = 0; i < 8; i++) {
1215 cvmx_sso_rwq_head_ptrx_t head_ptr;
1216 cvmx_sso_rwq_tail_ptrx_t tail_ptr;
1218 head_ptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_HEAD_PTRX(i));
1219 tail_ptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_TAIL_PTRX(i));
1220 if (head_ptr.s.ptr != tail_ptr.s.ptr) {
1221 cvmx_dprintf("head_ptr.s.ptr != tail_ptr.s.ptr, idx: %d\n", i);
1224 mem = cvmx_phys_to_ptr(((uint64_t)head_ptr.s.ptr) << 7);
1225 /* Leak the memory */
1230 pop_fptr.u64 = cvmx_read_csr(CVMX_SSO_RWQ_POP_FPTR);
1231 if (pop_fptr.s.val) {
1232 mem = cvmx_phys_to_ptr(((uint64_t)pop_fptr.s.fptr) << 7);
1233 /* Leak the memory */
1235 } while (pop_fptr.s.val);
1236 fpage_cnt.u64 = cvmx_read_csr(CVMX_SSO_FPAGE_CNT);
1237 } while (fpage_cnt.s.fpage_cnt);
1240 sso_cfg.s.rwq_byp_dis = 0;
1241 cvmx_write_csr(CVMX_SSO_CFG, sso_cfg.u64);
1247 * Initialize the PIP, IPD, and PKO hardware to support
1248 * simple priority based queues for the ethernet ports. Each
1249 * port is configured with a number of priority queues based
1250 * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
1251 * priority than the previous.
1253 * @return Zero on success, non-zero on failure
1255 int cvmx_helper_initialize_packet_io_global(void)
1259 cvmx_l2c_cfg_t l2c_cfg;
1260 cvmx_smix_en_t smix_en;
1261 const int num_interfaces = cvmx_helper_get_number_of_interfaces();
1263 /* CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to be disabled */
1264 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
1265 __cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
1267 /* Tell L2 to give the IOB statically higher priority compared to the
1268 cores. This avoids conditions where IO blocks might be starved under
1269 very high L2 loads */
1270 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
1272 cvmx_l2c_ctl_t l2c_ctl;
1273 l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
1274 l2c_ctl.s.rsp_arb_mode = 1;
1275 l2c_ctl.s.xmc_arb_mode = 0;
1276 cvmx_write_csr(CVMX_L2C_CTL, l2c_ctl.u64);
1280 l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
1281 l2c_cfg.s.lrf_arb_mode = 0;
1282 l2c_cfg.s.rfb_arb_mode = 0;
1283 cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
1286 if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
1291 /* Newer chips have more than one SMI/MDIO interface */
1292 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1294 else if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)
1295 && !OCTEON_IS_MODEL(OCTEON_CN58XX)
1296 && !OCTEON_IS_MODEL(OCTEON_CN50XX))
1299 for (i = 0; i < smi_inf; i++)
1301 /* Make sure SMI/MDIO is enabled so we can query PHYs */
1302 smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(i));
1306 cvmx_write_csr(CVMX_SMIX_EN(i), smix_en.u64);
1311 __cvmx_helper_cfg_init();
1313 for (interface=0; interface<num_interfaces; interface++)
1314 result |= cvmx_helper_interface_probe(interface);
1316 cvmx_pko_initialize_global();
1317 for (interface=0; interface<num_interfaces; interface++)
1319 if (cvmx_helper_ports_on_interface(interface) > 0)
1320 cvmx_dprintf("Interface %d has %d ports (%s)\n",
1321 interface, cvmx_helper_ports_on_interface(interface),
1322 cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(interface)));
1323 result |= __cvmx_helper_interface_setup_ipd(interface);
1324 if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
1325 result |= __cvmx_helper_interface_setup_pko(interface);
1328 result |= __cvmx_helper_global_setup_ipd();
1329 result |= __cvmx_helper_global_setup_pko();
1331 /* Enable any flow control and backpressure */
1332 result |= __cvmx_helper_global_setup_backpressure();
1334 #if CVMX_HELPER_ENABLE_IPD
1335 result |= cvmx_helper_ipd_and_packet_input_enable();
1339 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1340 EXPORT_SYMBOL(cvmx_helper_initialize_packet_io_global);
1345 * Does core local initialization for packet io
1347 * @return Zero on success, non-zero on failure
1349 int cvmx_helper_initialize_packet_io_local(void)
1351 return cvmx_pko_initialize_local();
1355 * wait for the pko queue to drain
1357 * @param queue a valid pko queue
1358 * @return count is the length of the queue after calling this
1361 static int cvmx_helper_wait_pko_queue_drain(int queue)
1363 const int timeout = 5; /* Wait up to 5 seconds for timeouts */
1365 uint64_t start_cycle, stop_cycle;
1367 count = cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue));
1368 start_cycle = cvmx_get_cycle();
1369 stop_cycle = start_cycle + cvmx_clock_get_rate(CVMX_CLOCK_CORE) * timeout;
1370 while (count && (cvmx_get_cycle() < stop_cycle))
1373 count = cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue));
1379 struct cvmx_buffer_list {
1380 struct cvmx_buffer_list *next;
1384 * Undo the initialization performed in
1385 * cvmx_helper_initialize_packet_io_global(). After calling this routine and the
1386 * local version on each core, packet IO for Octeon will be disabled and placed
1387 * in the initial reset state. It will then be safe to call the initialize
1388 * later on. Note that this routine does not empty the FPA pools. It frees all
1389 * buffers used by the packet IO hardware to the FPA so a function emptying the
1390 * FPA after shutdown should find all packet buffers in the FPA.
1392 * @return Zero on success, negative on failure.
1394 int cvmx_helper_shutdown_packet_io_global(void)
1396 const int timeout = 5; /* Wait up to 5 seconds for timeouts */
1402 struct cvmx_buffer_list *pool0_buffers;
1403 struct cvmx_buffer_list *pool0_buffers_tail;
1406 /* Step 1: Disable all backpressure */
1407 for (interface=0; interface<CVMX_HELPER_MAX_GMX; interface++)
1408 if (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_DISABLED)
1409 cvmx_gmx_set_backpressure_override(interface, 0xf);
1412 /* Step 2: Wait for the PKO queues to drain */
1413 if (octeon_has_feature(OCTEON_FEATURE_PKND))
1415 int queue, max_queue;
1417 max_queue = __cvmx_helper_cfg_pko_max_queue();
1418 for (queue = 0; queue < max_queue; queue++)
1420 if (cvmx_helper_wait_pko_queue_drain(queue))
1429 num_interfaces = cvmx_helper_get_number_of_interfaces();
1430 for (interface=0; interface<num_interfaces; interface++)
1432 num_ports = cvmx_helper_ports_on_interface(interface);
1433 for (index=0; index<num_ports; index++)
1435 int pko_port = cvmx_helper_get_ipd_port(interface, index);
1436 int queue = cvmx_pko_get_base_queue(pko_port);
1437 int max_queue = queue + cvmx_pko_get_num_queues(pko_port);
1438 while (queue < max_queue)
1440 if (cvmx_helper_wait_pko_queue_drain(queue))
1452 /* Step 3: Disable TX and RX on all ports */
1453 for (interface=0; interface<CVMX_HELPER_MAX_GMX; interface++)
1455 switch (cvmx_helper_interface_get_mode(interface))
1457 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1458 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1459 /* Not a packet interface */
1461 case CVMX_HELPER_INTERFACE_MODE_NPI:
1462 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1463 case CVMX_HELPER_INTERFACE_MODE_ILK:
1464 /* We don't handle the NPI/NPEI/SRIO packet engines. The caller
1465 must know these are idle */
1467 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1468 /* Nothing needed. Once PKO is idle, the loopback devices
1471 case CVMX_HELPER_INTERFACE_MODE_SPI:
1472 /* SPI cannot be disabled from Octeon. It is the responsibility
1473 of the caller to make sure SPI is idle before doing
1475 /* Fall through and do the same processing as RGMII/GMII */
1476 case CVMX_HELPER_INTERFACE_MODE_GMII:
1477 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1478 /* Disable outermost RX at the ASX block */
1479 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), 0);
1480 num_ports = cvmx_helper_ports_on_interface(interface);
1483 for (index=0; index<num_ports; index++)
1485 cvmx_gmxx_prtx_cfg_t gmx_cfg;
1486 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
1488 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
1489 /* Poll the GMX state machine waiting for it to become idle */
1490 cvmx_write_csr(CVMX_NPI_DBG_SELECT, interface*0x800 + index*0x100 + 0x880);
1491 if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&7, ==, 0, timeout*1000000))
1493 cvmx_dprintf("GMX RX path timeout waiting for idle\n");
1496 if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&0xf, ==, 0, timeout*1000000))
1498 cvmx_dprintf("GMX TX path timeout waiting for idle\n");
1502 /* Disable outermost TX at the ASX block */
1503 cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), 0);
1504 /* Disable interrupts for interface */
1505 cvmx_write_csr(CVMX_ASXX_INT_EN(interface), 0);
1506 cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0);
1508 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1509 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1510 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1511 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1512 num_ports = cvmx_helper_ports_on_interface(interface);
1515 for (index=0; index<num_ports; index++)
1517 cvmx_gmxx_prtx_cfg_t gmx_cfg;
1518 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
1520 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
1521 if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, timeout*1000000))
1523 cvmx_dprintf("GMX RX path timeout waiting for idle\n");
1526 if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, timeout*1000000))
1528 cvmx_dprintf("GMX TX path timeout waiting for idle\n");
1536 /* Step 4: Retrieve all packets from the POW and free them */
1537 while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT)))
1539 cvmx_helper_free_packet_data(work);
1540 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 0);
1543 /* Step 4b: Special workaround for pass 2 errata */
1544 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
1546 cvmx_ipd_ptr_count_t ipd_cnt;
1548 ipd_cnt.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
1549 to_add = (ipd_cnt.s.wqev_cnt + ipd_cnt.s.wqe_pcnt) & 0x7;
1553 cvmx_dprintf("Aligning CN38XX pass 2 IPD counters\n");
1554 if (cvmx_helper_interface_get_mode(0) == CVMX_HELPER_INTERFACE_MODE_RGMII)
1556 else if (cvmx_helper_interface_get_mode(1) == CVMX_HELPER_INTERFACE_MODE_RGMII)
1561 char *buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
1564 int queue = cvmx_pko_get_base_queue(port);
1565 cvmx_pko_command_word0_t pko_command;
1566 cvmx_buf_ptr_t packet;
1567 uint64_t start_cycle;
1568 uint64_t stop_cycle;
1570 /* Populate a minimal packet */
1571 memset(buffer, 0xff, 6);
1572 memset(buffer+6, 0, 54);
1573 pko_command.u64 = 0;
1574 pko_command.s.dontfree = 1;
1575 pko_command.s.total_bytes = 60;
1576 pko_command.s.segs = 1;
1578 packet.s.addr = cvmx_ptr_to_phys(buffer);
1579 packet.s.size = CVMX_FPA_PACKET_POOL_SIZE;
1580 __cvmx_helper_rgmii_configure_loopback(port, 1, 0);
1583 cvmx_pko_send_packet_prepare(port, queue, CVMX_PKO_LOCK_CMD_QUEUE);
1584 if (cvmx_pko_send_packet_finish(port, queue, pko_command, packet, CVMX_PKO_LOCK_CMD_QUEUE))
1586 cvmx_dprintf("ERROR: Unable to align IPD counters (PKO failed)\n");
1590 cvmx_fpa_free(buffer, CVMX_FPA_PACKET_POOL, 0);
1592 /* Wait for the packets to loop back */
1593 start_cycle = cvmx_get_cycle();
1594 stop_cycle = start_cycle + cvmx_clock_get_rate(CVMX_CLOCK_CORE) * timeout;
1595 while (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue)) &&
1596 (cvmx_get_cycle() < stop_cycle))
1601 __cvmx_helper_rgmii_configure_loopback(port, 0, 0);
1606 cvmx_dprintf("ERROR: Unable to align IPD counters (Packet pool empty)\n");
1609 cvmx_dprintf("ERROR: Unable to align IPD counters\n");
1616 /* Step 6: Drain all prefetched buffers from IPD/PIP. Note that IPD/PIP
1617 have not been reset yet */
1618 __cvmx_ipd_free_ptr();
1620 /* Step 7: Free the PKO command buffers and put PKO in reset */
1621 cvmx_pko_shutdown();
1623 /* Step 8: Disable MAC address filtering */
1624 for (interface=0; interface<CVMX_HELPER_MAX_GMX; interface++)
1626 switch (cvmx_helper_interface_get_mode(interface))
1628 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1629 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1630 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1631 case CVMX_HELPER_INTERFACE_MODE_ILK:
1632 case CVMX_HELPER_INTERFACE_MODE_NPI:
1633 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1635 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1636 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1637 case CVMX_HELPER_INTERFACE_MODE_GMII:
1638 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1639 case CVMX_HELPER_INTERFACE_MODE_SPI:
1640 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1641 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1642 num_ports = cvmx_helper_ports_on_interface(interface);
1645 for (index=0; index<num_ports; index++)
1647 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 1);
1648 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0);
1649 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 0);
1650 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 0);
1651 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 0);
1652 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 0);
1653 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 0);
1654 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 0);
1660 /* Step 9: Drain all FPA buffers out of pool 0 before we reset
1661 * IPD/PIP. This is needed to keep IPD_QUE0_FREE_PAGE_CNT in
1662 * sync. We temporarily keep the buffers in the pool0_buffers
1665 pool0_buffers = NULL;
1666 pool0_buffers_tail = NULL;
1669 struct cvmx_buffer_list *buffer = cvmx_fpa_alloc(0);
1671 buffer->next = NULL;
1673 if (pool0_buffers == NULL)
1674 pool0_buffers = buffer;
1676 pool0_buffers_tail->next = buffer;
1678 pool0_buffers_tail = buffer;
1684 /* Step 10: Reset IPD and PIP */
1686 cvmx_ipd_ctl_status_t ipd_ctl_status;
1687 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
1688 ipd_ctl_status.s.reset = 1;
1689 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
1691 if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
1692 (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
1694 /* only try 1000 times. Normally if this works it will happen in
1695 ** the first 50 loops. */
1696 int max_loops = 1000;
1698 /* Per port backpressure counters can get misaligned after an
1699 IPD reset. This code realigns them by performing repeated
1700 resets. See IPD-13473 */
1702 if (__cvmx_helper_backpressure_is_misaligned())
1704 cvmx_dprintf("Starting to align per port backpressure counters.\n");
1705 while (__cvmx_helper_backpressure_is_misaligned() && (loop++ < max_loops))
1707 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
1710 if (loop < max_loops)
1711 cvmx_dprintf("Completed aligning per port backpressure counters (%d loops).\n", loop);
1714 cvmx_dprintf("ERROR: unable to align per port backpressure counters.\n");
1715 /* For now, don't hang.... */
1720 /* PIP_SFT_RST not present in CN38XXp{1,2} */
1721 if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
1723 cvmx_pip_sft_rst_t pip_sft_rst;
1724 pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
1725 pip_sft_rst.s.rst = 1;
1726 cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
1729 /* Make sure IPD has finished reset. */
1730 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1732 if (CVMX_WAIT_FOR_FIELD64(CVMX_IPD_CTL_STATUS, cvmx_ipd_ctl_status_t, rst_done, ==, 0, 1000))
1734 cvmx_dprintf("IPD reset timeout waiting for idle\n");
1740 /* Step 11: Restore the FPA buffers into pool 0 */
1741 while (pool0_buffers) {
1742 struct cvmx_buffer_list *n = pool0_buffers->next;
1743 cvmx_fpa_free(pool0_buffers, 0, 0);
1747 /* Step 12: Release interface structures */
1748 __cvmx_helper_shutdown_interfaces();
1752 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1753 EXPORT_SYMBOL(cvmx_helper_shutdown_packet_io_global);
1758 * Does core local shutdown of packet io
1760 * @return Zero on success, non-zero on failure
1762 int cvmx_helper_shutdown_packet_io_local(void)
1764 /* Currently there is nothing to do per core. This may change in
1772 * Auto configure an IPD/PKO port link state and speed. This
1773 * function basically does the equivalent of:
1774 * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
1776 * @param ipd_port IPD/PKO port to auto configure
1778 * @return Link state after configure
1780 cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port)
1782 cvmx_helper_link_info_t link_info;
1783 int interface = cvmx_helper_get_interface_num(ipd_port);
1784 int index = cvmx_helper_get_interface_index_num(ipd_port);
1786 if (index >= cvmx_helper_ports_on_interface(interface))
1792 link_info = cvmx_helper_link_get(ipd_port);
1793 if (link_info.u64 == (__cvmx_helper_get_link_info(interface, index)).u64)
1796 #if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
1797 if (!link_info.s.link_up)
1798 cvmx_error_disable_group(CVMX_ERROR_GROUP_ETHERNET, ipd_port);
1801 /* If we fail to set the link speed, port_link_info will not change */
1802 cvmx_helper_link_set(ipd_port, link_info);
1804 #if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
1805 if (link_info.s.link_up)
1806 cvmx_error_enable_group(CVMX_ERROR_GROUP_ETHERNET, ipd_port);
1811 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1812 EXPORT_SYMBOL(cvmx_helper_link_autoconf);
1816 * Return the link state of an IPD/PKO port as returned by
1817 * auto negotiation. The result of this function may not match
1818 * Octeon's link config if auto negotiation has changed since
1819 * the last call to cvmx_helper_link_set().
1821 * @param ipd_port IPD/PKO port to query
1823 * @return Link state
1825 cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
1827 cvmx_helper_link_info_t result;
1828 int interface = cvmx_helper_get_interface_num(ipd_port);
1829 int index = cvmx_helper_get_interface_index_num(ipd_port);
1831 /* The default result will be a down link unless the code below
1835 if (index >= cvmx_helper_ports_on_interface(interface))
1838 switch (cvmx_helper_interface_get_mode(interface))
1840 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1841 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1842 /* Network links are not supported */
1844 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1845 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1846 result = __cvmx_helper_xaui_link_get(ipd_port);
1848 case CVMX_HELPER_INTERFACE_MODE_GMII:
1850 result = __cvmx_helper_rgmii_link_get(ipd_port);
1853 result.s.full_duplex = 1;
1854 result.s.link_up = 1;
1855 result.s.speed = 1000;
1858 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1859 result = __cvmx_helper_rgmii_link_get(ipd_port);
1861 case CVMX_HELPER_INTERFACE_MODE_SPI:
1862 result = __cvmx_helper_spi_link_get(ipd_port);
1864 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1865 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1866 result = __cvmx_helper_sgmii_link_get(ipd_port);
1868 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1869 result = __cvmx_helper_srio_link_get(ipd_port);
1871 case CVMX_HELPER_INTERFACE_MODE_ILK:
1872 result = __cvmx_helper_ilk_link_get(ipd_port);
1874 case CVMX_HELPER_INTERFACE_MODE_NPI:
1875 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1876 /* Network links are not supported */
1881 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1882 EXPORT_SYMBOL(cvmx_helper_link_get);
1887 * Configure an IPD/PKO port for the specified link state. This
1888 * function does not influence auto negotiation at the PHY level.
1889 * The passed link state must always match the link state returned
1890 * by cvmx_helper_link_get(). It is normally best to use
1891 * cvmx_helper_link_autoconf() instead.
1893 * @param ipd_port IPD/PKO port to configure
1894 * @param link_info The new link state
1896 * @return Zero on success, negative on failure
1898 int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
1901 int interface = cvmx_helper_get_interface_num(ipd_port);
1902 int index = cvmx_helper_get_interface_index_num(ipd_port);
1904 if (index >= cvmx_helper_ports_on_interface(interface))
1907 switch (cvmx_helper_interface_get_mode(interface))
1909 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1910 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1912 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1913 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1914 result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
1916 /* RGMII/GMII/MII are all treated about the same. Most functions
1917 refer to these ports as RGMII */
1918 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1919 case CVMX_HELPER_INTERFACE_MODE_GMII:
1920 result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
1922 case CVMX_HELPER_INTERFACE_MODE_SPI:
1923 result = __cvmx_helper_spi_link_set(ipd_port, link_info);
1925 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1926 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1927 result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
1929 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1930 result = __cvmx_helper_srio_link_set(ipd_port, link_info);
1932 case CVMX_HELPER_INTERFACE_MODE_ILK:
1933 result = __cvmx_helper_ilk_link_set(ipd_port, link_info);
1935 case CVMX_HELPER_INTERFACE_MODE_NPI:
1936 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1939 /* Set the port_link_info here so that the link status is updated
1940 no matter how cvmx_helper_link_set is called. We don't change
1941 the value if link_set failed */
1943 __cvmx_helper_set_link_info(interface, index, link_info);
1946 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1947 EXPORT_SYMBOL(cvmx_helper_link_set);
1952 * Configure a port for internal and/or external loopback. Internal loopback
1953 * causes packets sent by the port to be received by Octeon. External loopback
1954 * causes packets received from the wire to sent out again.
1956 * @param ipd_port IPD/PKO port to loopback.
1957 * @param enable_internal
1958 * Non zero if you want internal loopback
1959 * @param enable_external
1960 * Non zero if you want external loopback
1962 * @return Zero on success, negative on failure.
1964 int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, int enable_external)
1967 int interface = cvmx_helper_get_interface_num(ipd_port);
1968 int index = cvmx_helper_get_interface_index_num(ipd_port);
1970 if (index >= cvmx_helper_ports_on_interface(interface))
1973 switch (cvmx_helper_interface_get_mode(interface))
1975 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1976 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1977 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1978 case CVMX_HELPER_INTERFACE_MODE_ILK:
1979 case CVMX_HELPER_INTERFACE_MODE_SPI:
1980 case CVMX_HELPER_INTERFACE_MODE_NPI:
1981 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1983 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1984 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1985 result = __cvmx_helper_xaui_configure_loopback(ipd_port, enable_internal, enable_external);
1987 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1988 case CVMX_HELPER_INTERFACE_MODE_GMII:
1989 result = __cvmx_helper_rgmii_configure_loopback(ipd_port, enable_internal, enable_external);
1991 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1992 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1993 result = __cvmx_helper_sgmii_configure_loopback(ipd_port, enable_internal, enable_external);
1999 #endif /* CVMX_ENABLE_PKO_FUNCTIONS */